Total coverage: 55115 (5%)of 1170086
11 11 11 11 11 11 11 11 1 1 11 11 1 11 1 11 1 11 1 10 11 1 11 1 11 1 11 1 11 11 1 1 1 1 1 1 1 1 1 1 1 1 1 11 11 11 1 1 1 1 11 1 1 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> */ /* Module: rt2500usb Abstract: rt2500usb device specific routines. Supported chipsets: RT2570. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt2500usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * All access to the CSR registers will go through the methods * rt2500usb_register_read and rt2500usb_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_USB_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * If the csr_mutex is already held then the _lock variants must * be used instead. */ static u16 rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { __le16 reg; rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg)); return le16_to_cpu(reg); } static u16 rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { __le16 reg; rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); return le16_to_cpu(reg); } static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg)); } static void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); } static void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, value, length); } static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, struct rt2x00_field16 field, u16 *reg) { unsigned int i; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { *reg = rt2500usb_register_read_lock(rt2x00dev, offset); if (!rt2x00_get_field16(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } #define WAIT_FOR_BBP(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR8, PHY_CSR8_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg)) static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_DATA, value); rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 0); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u16 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); if (WAIT_FOR_BBP(rt2x00dev, &reg)) reg = rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7); } value = rt2x00_get_field16(reg, PHY_CSR7_DATA); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR9_RF_VALUE, value); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg); reg = 0; rt2x00_set_field16(&reg, PHY_CSR10_RF_VALUE, value >> 16); rt2x00_set_field16(&reg, PHY_CSR10_RF_NUMBER_OF_BITS, 20); rt2x00_set_field16(&reg, PHY_CSR10_RF_IF_SELECT, 0); rt2x00_set_field16(&reg, PHY_CSR10_RF_BUSY, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static u32 _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { return rt2500usb_register_read(rt2x00dev, offset); } static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 value) { rt2500usb_register_write(rt2x00dev, offset, value); } static const struct rt2x00debug rt2500usb_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = _rt2500usb_register_read, .write = _rt2500usb_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u16), .word_count = CSR_REG_SIZE / sizeof(u16), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2500usb_bbp_read, .write = rt2500usb_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2500usb_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u16 reg; reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19); return rt2x00_get_field16(reg, MAC_CSR19_VAL7); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2500usb_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; u16 reg; reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR20); if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) rt2x00_set_field16(&reg, MAC_CSR20_LINK, enabled); else if (led->type == LED_TYPE_ACTIVITY) rt2x00_set_field16(&reg, MAC_CSR20_ACTIVITY, enabled); rt2500usb_register_write(led->rt2x00dev, MAC_CSR20, reg); } static int rt2500usb_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u16 reg; reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR21); rt2x00_set_field16(&reg, MAC_CSR21_ON_PERIOD, *delay_on); rt2x00_set_field16(&reg, MAC_CSR21_OFF_PERIOD, *delay_off); rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg); return 0; } static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2500usb_brightness_set; led->led_dev.blink_set = rt2500usb_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ /* * rt2500usb does not differentiate between shared and pairwise * keys, so we should use the same function for both key types. */ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { u32 mask; u16 reg; enum cipher curr_cipher; if (crypto->cmd == SET_KEY) { /* * Disallow to set WEP key other than with index 0, * it is known that not work at least on some hardware. * SW crypto will be used in that case. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) && key->keyidx != 0) return -EOPNOTSUPP; /* * Pairwise key will always be entry 0, but this * could collide with a shared key on the same * position... */ mask = TXRX_CSR0_KEY_ID.bit_mask; reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM); reg &= mask; if (reg && reg == mask) return -ENOSPC; reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); key->hw_key_idx += reg ? ffz(reg) : 0; /* * Hardware requires that all keys use the same cipher * (e.g. TKIP-only, AES-only, but not TKIP+AES). * If this is not the first key, compare the cipher with the * first one and fall back to SW crypto if not the same. */ if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher) return -EOPNOTSUPP; rt2500usb_register_multiwrite(rt2x00dev, KEY_ENTRY(key->hw_key_idx), crypto->key, sizeof(crypto->key)); /* * The driver does not support the IV/EIV generation * in hardware. However it demands the data to be provided * both separately as well as inside the frame. * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib * to ensure rt2x00lib will not strip the data from the * frame after the copy, now we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; } /* * TXRX_CSR0_KEY_ID contains only single-bit fields to indicate * a particular key is valid. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, crypto->cipher); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); mask = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); if (crypto->cmd == SET_KEY) mask |= 1 << key->hw_key_idx; else if (crypto->cmd == DISABLE_KEY) mask &= ~(1 << key->hw_key_idx); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, mask); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); return 0; } static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u16 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_BROADCAST, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); } static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { unsigned int bcn_preload; u16 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable beacon config */ bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR20); rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6); rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 2 * (conf->type != NL80211_IFTYPE_STATION)); rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); /* * Enable synchronisation. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18); rt2x00_set_field16(&reg, TXRX_CSR18_OFFSET, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } if (flags & CONFIG_UPDATE_MAC) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, (3 * sizeof(__le16))); if (flags & CONFIG_UPDATE_BSSID) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, conf->bssid, (3 * sizeof(__le16))); } static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u16 reg; if (changed & BSS_CHANGED_ERP_PREAMBLE) { reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR10); rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18); rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); } } static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r2; u8 r14; u16 csr5; u16 csr6; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); r2 = rt2500usb_bbp_read(rt2x00dev, 2); r14 = rt2500usb_bbp_read(rt2x00dev, 14); csr5 = rt2500usb_register_read(rt2x00dev, PHY_CSR5); csr6 = rt2500usb_register_read(rt2x00dev, PHY_CSR6); /* * Configure the TX antenna. */ switch (ant->tx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); break; case ANTENNA_A: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); break; case ANTENNA_A: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); break; } /* * RT2525E and RT5222 need to flip TX I/Q */ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); /* * RT2525E does not need RX I/Q Flip. */ if (rt2x00_rf(rt2x00dev, RF2525E)) rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); } else { rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); } rt2500usb_bbp_write(rt2x00dev, 2, r2); rt2500usb_bbp_write(rt2x00dev, 14, r14); rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); } static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { /* * Set TXpower. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); /* * For RT2525E we should first set the channel to half band higher. */ if (rt2x00_rf(rt2x00dev, RF2525E)) { static const u32 vals[] = { 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, 0x00000902, 0x00000906 }; rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { u32 rf3; rf3 = rt2x00_rf_read(rt2x00dev, 3); rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2500usb_rf_write(rt2x00dev, 3, rf3); } static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u16 reg; if (state == STATE_SLEEP) { reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, rt2x00dev->beacon_int - 20); rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); /* We must first disable autowake before it can be enabled */ rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } else { reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2500usb_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt2500usb_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_PS) rt2500usb_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 reg; /* * Update FCS error count from register. */ reg = rt2500usb_register_read(rt2x00dev, STA_CSR0); qual->rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ reg = rt2500usb_register_read(rt2x00dev, STA_CSR3); qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); } static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 eeprom; u16 value; eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); rt2500usb_bbp_write(rt2x00dev, 24, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); rt2500usb_bbp_write(rt2x00dev, 25, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); rt2500usb_bbp_write(rt2x00dev, 61, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); rt2500usb_bbp_write(rt2x00dev, 17, value); qual->vgc_level = value; } /* * Queue handlers. */ static void rt2500usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } static void rt2500usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } /* * Initialization functions. */ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) { u16 reg; rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, USB_MODE_TEST, REGISTER_TIMEOUT); rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, 0x00f0, REGISTER_TIMEOUT); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR5); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0, 13); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1, 12); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR6); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0, 10); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1, 11); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR7); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0, 7); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1, 6); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR8); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0, 5); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1, 0); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1_VALID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) { reg = rt2500usb_register_read(rt2x00dev, PHY_CSR2); rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0); } else { reg = 0; rt2x00_set_field16(&reg, PHY_CSR2_LNA, 1); rt2x00_set_field16(&reg, PHY_CSR2_LNA_MODE, 3); } rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR8); rt2x00_set_field16(&reg, MAC_CSR8_MAX_FRAME_UNIT, rt2x00dev->rx->data_size); rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, CIPHER_NONE); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, 90); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); reg = rt2500usb_register_read(rt2x00dev, PHY_CSR4); rt2x00_set_field16(&reg, PHY_CSR4_LOW_RF_LE, 1); rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR1); rt2x00_set_field16(&reg, TXRX_CSR1_AUTO_SEQUENCE, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); return 0; } static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { value = rt2500usb_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 value; u8 reg_id; if (unlikely(rt2500usb_wait_bbp_ready(rt2x00dev))) return -EACCES; rt2500usb_bbp_write(rt2x00dev, 3, 0x02); rt2500usb_bbp_write(rt2x00dev, 4, 0x19); rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); rt2500usb_bbp_write(rt2x00dev, 15, 0x30); rt2500usb_bbp_write(rt2x00dev, 16, 0xac); rt2500usb_bbp_write(rt2x00dev, 18, 0x18); rt2500usb_bbp_write(rt2x00dev, 19, 0xff); rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); rt2500usb_bbp_write(rt2x00dev, 21, 0x08); rt2500usb_bbp_write(rt2x00dev, 22, 0x08); rt2500usb_bbp_write(rt2x00dev, 23, 0x08); rt2500usb_bbp_write(rt2x00dev, 24, 0x80); rt2500usb_bbp_write(rt2x00dev, 25, 0x50); rt2500usb_bbp_write(rt2x00dev, 26, 0x08); rt2500usb_bbp_write(rt2x00dev, 27, 0x23); rt2500usb_bbp_write(rt2x00dev, 30, 0x10); rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); rt2500usb_bbp_write(rt2x00dev, 34, 0x12); rt2500usb_bbp_write(rt2x00dev, 35, 0x50); rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); rt2500usb_bbp_write(rt2x00dev, 40, 0x02); rt2500usb_bbp_write(rt2x00dev, 41, 0x60); rt2500usb_bbp_write(rt2x00dev, 53, 0x10); rt2500usb_bbp_write(rt2x00dev, 54, 0x18); rt2500usb_bbp_write(rt2x00dev, 56, 0x08); rt2500usb_bbp_write(rt2x00dev, 57, 0x10); rt2500usb_bbp_write(rt2x00dev, 58, 0x08); rt2500usb_bbp_write(rt2x00dev, 61, 0x60); rt2500usb_bbp_write(rt2x00dev, 62, 0x10); rt2500usb_bbp_write(rt2x00dev, 75, 0xff); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2500usb_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt2500usb_init_registers(rt2x00dev) || rt2500usb_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); /* * Disable synchronisation. */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); rt2x00usb_disable_radio(rt2x00dev); } static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u16 reg; u16 reg2; unsigned int i; bool put_to_sleep; u8 bbp_state; u8 rf_state; put_to_sleep = (state != STATE_AWAKE); reg = 0; rt2x00_set_field16(&reg, MAC_CSR17_BBP_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_RF_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); rt2x00_set_field16(&reg, MAC_CSR17_SET_STATE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { reg2 = rt2500usb_register_read(rt2x00dev, MAC_CSR17); bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); msleep(30); } return -EBUSY; } static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2500usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2500usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2500usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt2500usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txd = (__le32 *) entry->skb->data; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); rt2x00_desc_write(txd, 0, word); word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt2500usb_beacondone(struct urb *urb); static void rt2500usb_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint); int length; u16 reg, reg0; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); /* * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); memset(entry->skb->data, 0, TXD_DESC_SIZE); /* * Write the TX descriptor for the beacon. */ rt2500usb_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * USB devices cannot blindly pass the skb->len as the * length of the data to usb_fill_bulk_urb. Pass the skb * to the driver to determine what the length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe, entry->skb->data, length, rt2500usb_beacondone, entry); /* * Second we need to create the guardian byte. * We only need a single byte, so lets recycle * the 'flags' field we are not using for beacons. */ bcn_priv->guardian_data = 0; usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe, &bcn_priv->guardian_data, 1, rt2500usb_beacondone, entry); /* * Send out the guardian byte. */ usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); /* * Enable beaconing again. */ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); reg0 = reg; rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); /* * Beacon generation will fail initially. * To prevent this we need to change the TXRX_CSR19 * register several times (reg0 is the same as reg * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0 * and 1 in reg). */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } static int rt2500usb_get_tx_data_len(struct queue_entry *entry) { int length; /* * The length _must_ be a multiple of 2, * but it must _not_ be a multiple of the USB packet size. */ length = roundup(entry->skb->len, 2); length += (2 * !(length % entry->queue->usb_maxpacket)); return length; } /* * RX control handlers */ static void rt2500usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxd = (__le32 *)(entry->skb->data + (entry_priv->urb->actual_length - entry->queue->desc_size)); u32 word0; u32 word1; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of * frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxd, skbdesc->desc_len); rxd = (__le32 *)skbdesc->desc; /* * It is now safe to read the descriptor on all architectures. */ word0 = rt2x00_desc_read(rxd, 0); word1 = rt2x00_desc_read(rxd, 1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER); if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR)) rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY; if (rxdesc->cipher != CIPHER_NONE) { rxdesc->iv[0] = _rt2x00_desc_read(rxd, 2); rxdesc->iv[1] = _rt2x00_desc_read(rxd, 3); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; /* ICV is located at the end of frame */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - rt2x00dev->rssi_offset; rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; /* * Adjust the skb memory window to the frame boundaries. */ skb_trim(entry->skb, rxdesc->size); } /* * Interrupt functions. */ static void rt2500usb_beacondone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) return; /* * Check if this was the guardian beacon, * if that was the case we need to send the real beacon now. * Otherwise we should free the sk_buffer, the device * should be doing the rest of the work now. */ if (bcn_priv->guardian_urb == urb) { usb_submit_urb(bcn_priv->urb, GFP_ATOMIC); } else if (bcn_priv->urb == urb) { dev_kfree_skb(entry->skb); entry->skb = NULL; } } /* * Device probe functions. */ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) { u16 word; u8 *mac; u8 bbp; rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, LED_MODE_DEFAULT); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, DEFAULT_RSSI_OFFSET); rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); rt2x00_eeprom_dbg(rt2x00dev, "Calibrate offset: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune: 0x%04x\n", word); } /* * Switch lower vgc bound to current BBP R17 value, * lower the value a bit for better quality. */ bbp = rt2500usb_bbp_read(rt2x00dev, 17); bbp -= 6; word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); } else { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r17: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r24: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r25: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r61: 0x%04x\n", word); } return 0; } static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) { u16 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR0); rt2x00_set_chip(rt2x00dev, RT2570, value, reg); if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) { rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n"); return -ENODEV; } if (!rt2x00_rf(rt2x00dev, RF2522) && !rt2x00_rf(rt2x00dev, RF2523) && !rt2x00_rf(rt2x00dev, RF2524) && !rt2x00_rf(rt2x00dev, RF2525) && !rt2x00_rf(rt2x00dev, RF2525E) && !rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. * I am not 100% sure about this, but the legacy drivers do not * indicate antenna swapping in software is required when * diversity is enabled. */ if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* * Store led mode, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); if (value == LED_MODE_TXRX_ACTIVITY || value == LED_MODE_DEFAULT || value == LED_MODE_ASUS) rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_ACTIVITY); #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read the RSSI <-> dBm offset information. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); rt2x00dev->rssi_offset = rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); return 0; } /* * RF value list for RF2522 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2522[] = { { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, }; /* * RF value list for RF2523 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2523[] = { { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, }; /* * RF value list for RF2524 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2524[] = { { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, }; /* * RF value list for RF2525 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525[] = { { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, }; /* * RF value list for RF2525e * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525e[] = { { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, }; /* * RF value list for RF5222 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5222[] = { { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, /* 802.11 HyperLan 2 */ { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, /* 802.11 UNII */ { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, }; static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Initialize all hw fields. * * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are * capable of sending the buffered frames out after the DTIM * transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); ieee80211_hw_set(rt2x00dev->hw, RX_INCLUDES_FCS); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); /* * Disable powersaving as default. */ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2522)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); spec->channels = rf_vals_bg_2522; } else if (rt2x00_rf(rt2x00dev, RF2523)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); spec->channels = rf_vals_bg_2523; } else if (rt2x00_rf(rt2x00dev, RF2524)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); spec->channels = rf_vals_bg_2524; } else if (rt2x00_rf(rt2x00dev, RF2525)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); spec->channels = rf_vals_bg_2525; } else if (rt2x00_rf(rt2x00dev, RF2525E)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); spec->channels = rf_vals_bg_2525e; } else if (rt2x00_rf(rt2x00dev, RF5222)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5222); spec->channels = rf_vals_5222; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = DEFAULT_TXPOWER; } } return 0; } static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u16 reg; /* * Allocate eeprom data. */ retval = rt2500usb_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2500usb_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19); rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); /* * Initialize hw specifications. */ retval = rt2500usb_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device requires the atim queue */ __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) { __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_COPY_IV, &rt2x00dev->cap_flags); } __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } static const struct ieee80211_ops rt2500usb_mac80211_ops = { .add_chanctx = ieee80211_emulate_add_chanctx, .remove_chanctx = ieee80211_emulate_remove_chanctx, .change_chanctx = ieee80211_emulate_change_chanctx, .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { .probe_hw = rt2500usb_probe_hw, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt2500usb_set_device_state, .rfkill_poll = rt2500usb_rfkill_poll, .link_stats = rt2500usb_link_stats, .reset_tuner = rt2500usb_reset_tuner, .watchdog = rt2x00usb_watchdog, .start_queue = rt2500usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2500usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt2500usb_write_tx_desc, .write_beacon = rt2500usb_write_beacon, .get_tx_data_len = rt2500usb_get_tx_data_len, .fill_rxdone = rt2500usb_fill_rxdone, .config_shared_key = rt2500usb_config_key, .config_pairwise_key = rt2500usb_config_key, .config_filter = rt2500usb_config_filter, .config_intf = rt2500usb_config_intf, .config_erp = rt2500usb_config_erp, .config_ant = rt2500usb_config_ant, .config = rt2500usb_config, }; static void rt2500usb_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_BEACON: queue->limit = 1; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn); break; case QID_ATIM: queue->limit = 8; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; default: BUG(); break; } } static const struct rt2x00_ops rt2500usb_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 1, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2500usb_queue_init, .lib = &rt2500usb_rt2x00_ops, .hw = &rt2500usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2500usb_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt2500usb module information. */ static const struct usb_device_id rt2500usb_device_table[] = { /* ASUS */ { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, /* Belkin */ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */ { USB_DEVICE(0x050d, 0x7051) }, /* Cisco Systems */ { USB_DEVICE(0x13b1, 0x000d) }, { USB_DEVICE(0x13b1, 0x0011) }, { USB_DEVICE(0x13b1, 0x001a) }, /* Conceptronic */ { USB_DEVICE(0x14b2, 0x3c02) }, /* D-LINK */ { USB_DEVICE(0x2001, 0x3c00) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x8001) }, { USB_DEVICE(0x1044, 0x8007) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe000) }, /* Melco */ { USB_DEVICE(0x0411, 0x005e) }, { USB_DEVICE(0x0411, 0x0066) }, { USB_DEVICE(0x0411, 0x0067) }, { USB_DEVICE(0x0411, 0x008b) }, { USB_DEVICE(0x0411, 0x0097) }, /* MSI */ { USB_DEVICE(0x0db0, 0x6861) }, { USB_DEVICE(0x0db0, 0x6865) }, { USB_DEVICE(0x0db0, 0x6869) }, /* Ralink */ { USB_DEVICE(0x148f, 0x1706) }, { USB_DEVICE(0x148f, 0x2570) }, { USB_DEVICE(0x148f, 0x9020) }, /* Sagem */ { USB_DEVICE(0x079b, 0x004b) }, /* Siemens */ { USB_DEVICE(0x0681, 0x3c06) }, /* SMC */ { USB_DEVICE(0x0707, 0xee13) }, /* Spairon */ { USB_DEVICE(0x114b, 0x0110) }, /* SURECOM */ { USB_DEVICE(0x0769, 0x11f3) }, /* Trust */ { USB_DEVICE(0x0eb0, 0x9020) }, /* VTech */ { USB_DEVICE(0x0f88, 0x3012) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0260) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); MODULE_LICENSE("GPL"); static int rt2500usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt2500usb_ops); } static struct usb_driver rt2500usb_driver = { .name = KBUILD_MODNAME, .id_table = rt2500usb_device_table, .probe = rt2500usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, .reset_resume = rt2x00usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rt2500usb_driver);
2 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 // SPDX-License-Identifier: GPL-2.0-or-later /* * USB CDC EEM network interface driver * Copyright (C) 2009 Oberthur Technologies * by Omar Laazimani, Olivier Condemine */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ctype.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/gfp.h> #include <linux/if_vlan.h> /* * This driver is an implementation of the CDC "Ethernet Emulation * Model" (EEM) specification, which encapsulates Ethernet frames * for transport over USB using a simpler USB device model than the * previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet"). * * For details, see https://usb.org/sites/default/files/CDC_EEM10.pdf * * This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24, * 2.6.27 and 2.6.30rc2 kernel. * It has also been validated on Openmoko Om 2008.12 (based on 2.6.24 kernel). * build on 23-April-2009 */ #define EEM_HEAD 2 /* 2 byte header */ /*-------------------------------------------------------------------------*/ static void eem_linkcmd_complete(struct urb *urb) { dev_kfree_skb(urb->context); usb_free_urb(urb); } static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb) { struct urb *urb; int status; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto fail; usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data, skb->len, eem_linkcmd_complete, skb); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { usb_free_urb(urb); fail: dev_kfree_skb(skb); netdev_warn(dev->net, "link cmd failure\n"); return; } } static int eem_bind(struct usbnet *dev, struct usb_interface *intf) { int status = 0; status = usbnet_get_endpoints(dev, intf); if (status < 0) return status; /* no jumbogram (16K) support for now */ dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; } /* * EEM permits packing multiple Ethernet frames into USB transfers * (a "bundle"), but for TX we don't try to do that. */ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb2 = NULL; u16 len = skb->len; u32 crc = 0; int padlen = 0; /* When ((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket) is * zero, stick two bytes of zero length EEM packet on the end. * Else the framework would add invalid single byte padding, * since it can't know whether ZLPs will be handled right by * all the relevant hardware and software. */ if (!((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket)) padlen += 2; if (!skb_cloned(skb)) { int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); if ((tailroom >= ETH_FCS_LEN + padlen) && (headroom >= EEM_HEAD)) goto done; if ((headroom + tailroom) > (EEM_HEAD + ETH_FCS_LEN + padlen)) { skb->data = memmove(skb->head + EEM_HEAD, skb->data, skb->len); skb_set_tail_pointer(skb, len); goto done; } } skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); dev_kfree_skb_any(skb); if (!skb2) return NULL; skb = skb2; done: /* we don't use the "no Ethernet CRC" option */ crc = crc32_le(~0, skb->data, skb->len); crc = ~crc; put_unaligned_le32(crc, skb_put(skb, 4)); /* EEM packet header format: * b0..13: length of ethernet frame * b14: bmCRC (1 == valid Ethernet CRC) * b15: bmType (0 == data) */ len = skb->len; put_unaligned_le16(BIT(14) | len, skb_push(skb, 2)); /* Bundle a zero length EEM packet if needed */ if (padlen) put_unaligned_le16(0, skb_put(skb, 2)); return skb; } static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { /* * Our task here is to strip off framing, leaving skb with one * data frame for the usbnet framework code to process. But we * may have received multiple EEM payloads, or command payloads. * So we must process _everything_ as if it's a header, except * maybe the last data payload * * REVISIT the framework needs updating so that when we consume * all payloads (the last or only message was a command, or a * zero length EEM packet) that is not accounted as an rx_error. */ do { struct sk_buff *skb2 = NULL; u16 header; u16 len = 0; /* incomplete EEM header? */ if (skb->len < EEM_HEAD) return 0; /* * EEM packet header format: * b0..14: EEM type dependent (Data or Command) * b15: bmType */ header = get_unaligned_le16(skb->data); skb_pull(skb, EEM_HEAD); /* * The bmType bit helps to denote when EEM * packet is data or command : * bmType = 0 : EEM data payload * bmType = 1 : EEM (link) command */ if (header & BIT(15)) { u16 bmEEMCmd; /* * EEM (link) command packet: * b0..10: bmEEMCmdParam * b11..13: bmEEMCmd * b14: bmReserved (must be 0) * b15: 1 (EEM command) */ if (header & BIT(14)) { netdev_dbg(dev->net, "reserved command %04x\n", header); continue; } bmEEMCmd = (header >> 11) & 0x7; switch (bmEEMCmd) { /* Responding to echo requests is mandatory. */ case 0: /* Echo command */ len = header & 0x7FF; /* bogus command? */ if (skb->len < len) return 0; skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) goto next; skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); eem_linkcmd(dev, skb2); break; /* * Host may choose to ignore hints. * - suspend: peripheral ready to suspend * - response: suggest N millisec polling * - response complete: suggest N sec polling * * Suspend is reported and maybe heeded. */ case 2: /* Suspend hint */ usbnet_device_suggests_idle(dev); continue; case 3: /* Response hint */ case 4: /* Response complete hint */ continue; /* * Hosts should never receive host-to-peripheral * or reserved command codes; or responses to an * echo command we didn't send. */ case 1: /* Echo response */ case 5: /* Tickle */ default: /* reserved */ netdev_warn(dev->net, "unexpected link command %d\n", bmEEMCmd); continue; } } else { u32 crc, crc2; int is_last; /* zero length EEM packet? */ if (header == 0) continue; /* * EEM data packet header : * b0..13: length of ethernet frame * b14: bmCRC * b15: 0 (EEM data) */ len = header & 0x3FFF; /* bogus EEM payload? */ if (skb->len < len) return 0; /* bogus ethernet frame? */ if (len < (ETH_HLEN + ETH_FCS_LEN)) goto next; /* * Treat the last payload differently: framework * code expects our "fixup" to have stripped off * headers, so "skb" is a data packet (or error). * Else if it's not the last payload, keep "skb" * for further processing. */ is_last = (len == skb->len); if (is_last) skb2 = skb; else { skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) return 0; } /* * The bmCRC helps to denote when the CRC field in * the Ethernet frame contains a calculated CRC: * bmCRC = 1 : CRC is calculated * bmCRC = 0 : CRC = 0xDEADBEEF */ if (header & BIT(14)) { crc = get_unaligned_le32(skb2->data + len - ETH_FCS_LEN); crc2 = ~crc32_le(~0, skb2->data, skb2->len - ETH_FCS_LEN); } else { crc = get_unaligned_be32(skb2->data + len - ETH_FCS_LEN); crc2 = 0xdeadbeef; } skb_trim(skb2, len - ETH_FCS_LEN); if (is_last) return crc == crc2; if (unlikely(crc != crc2)) { dev->net->stats.rx_errors++; dev_kfree_skb_any(skb2); } else usbnet_skb_return(dev, skb2); } next: skb_pull(skb, len); } while (skb->len); return 1; } static const struct driver_info eem_info = { .description = "CDC EEM Device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT, .bind = eem_bind, .rx_fixup = eem_rx_fixup, .tx_fixup = eem_tx_fixup, }; /*-------------------------------------------------------------------------*/ static const struct usb_device_id products[] = { { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM, USB_CDC_PROTO_EEM), .driver_info = (unsigned long) &eem_info, }, { /* EMPTY == end of list */ }, }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver eem_driver = { .name = "cdc_eem", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(eem_driver); MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>"); MODULE_DESCRIPTION("USB CDC EEM"); MODULE_LICENSE("GPL");
2 2 1 1 2 1 1 1 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 // SPDX-License-Identifier: GPL-2.0+ /* * LED & force feedback support for BigBen Interactive * * 0x146b:0x0902 "Bigben Interactive Bigben Game Pad" * "Kid-friendly Wired Controller" PS3OFMINIPAD SONY * sold for use with the PS3 * * Copyright (c) 2018 Hanno Zulla <kontakt@hanno.de> */ #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/leds.h> #include <linux/hid.h> #include "hid-ids.h" /* * The original descriptor for 0x146b:0x0902 * * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) * 0x09, 0x05, // Usage (Game Pad) * 0xA1, 0x01, // Collection (Application) * 0x15, 0x00, // Logical Minimum (0) * 0x25, 0x01, // Logical Maximum (1) * 0x35, 0x00, // Physical Minimum (0) * 0x45, 0x01, // Physical Maximum (1) * 0x75, 0x01, // Report Size (1) * 0x95, 0x0D, // Report Count (13) * 0x05, 0x09, // Usage Page (Button) * 0x19, 0x01, // Usage Minimum (0x01) * 0x29, 0x0D, // Usage Maximum (0x0D) * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) * 0x95, 0x03, // Report Count (3) * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) * 0x25, 0x07, // Logical Maximum (7) * 0x46, 0x3B, 0x01, // Physical Maximum (315) * 0x75, 0x04, // Report Size (4) * 0x95, 0x01, // Report Count (1) * 0x65, 0x14, // Unit (System: English Rotation, Length: Centimeter) * 0x09, 0x39, // Usage (Hat switch) * 0x81, 0x42, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State) * 0x65, 0x00, // Unit (None) * 0x95, 0x01, // Report Count (1) * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) * 0x26, 0xFF, 0x00, // Logical Maximum (255) * 0x46, 0xFF, 0x00, // Physical Maximum (255) * 0x09, 0x30, // Usage (X) * 0x09, 0x31, // Usage (Y) * 0x09, 0x32, // Usage (Z) * 0x09, 0x35, // Usage (Rz) * 0x75, 0x08, // Report Size (8) * 0x95, 0x04, // Report Count (4) * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) * 0x06, 0x00, 0xFF, // Usage Page (Vendor Defined 0xFF00) * 0x09, 0x20, // Usage (0x20) * 0x09, 0x21, // Usage (0x21) * 0x09, 0x22, // Usage (0x22) * 0x09, 0x23, // Usage (0x23) * 0x09, 0x24, // Usage (0x24) * 0x09, 0x25, // Usage (0x25) * 0x09, 0x26, // Usage (0x26) * 0x09, 0x27, // Usage (0x27) * 0x09, 0x28, // Usage (0x28) * 0x09, 0x29, // Usage (0x29) * 0x09, 0x2A, // Usage (0x2A) * 0x09, 0x2B, // Usage (0x2B) * 0x95, 0x0C, // Report Count (12) * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) * 0x0A, 0x21, 0x26, // Usage (0x2621) * 0x95, 0x08, // Report Count (8) * 0xB1, 0x02, // Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) * 0x0A, 0x21, 0x26, // Usage (0x2621) * 0x91, 0x02, // Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) * 0x26, 0xFF, 0x03, // Logical Maximum (1023) * 0x46, 0xFF, 0x03, // Physical Maximum (1023) * 0x09, 0x2C, // Usage (0x2C) * 0x09, 0x2D, // Usage (0x2D) * 0x09, 0x2E, // Usage (0x2E) * 0x09, 0x2F, // Usage (0x2F) * 0x75, 0x10, // Report Size (16) * 0x95, 0x04, // Report Count (4) * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) * 0xC0, // End Collection */ #define PID0902_RDESC_ORIG_SIZE 137 /* * The fixed descriptor for 0x146b:0x0902 * * - map buttons according to gamepad.rst * - assign right stick from Z/Rz to Rx/Ry * - map previously unused analog trigger data to Z/RZ * - simplify feature and output descriptor */ static const __u8 pid0902_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x05, /* Usage (Game Pad) */ 0xA1, 0x01, /* Collection (Application) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x35, 0x00, /* Physical Minimum (0) */ 0x45, 0x01, /* Physical Maximum (1) */ 0x75, 0x01, /* Report Size (1) */ 0x95, 0x0D, /* Report Count (13) */ 0x05, 0x09, /* Usage Page (Button) */ 0x09, 0x05, /* Usage (BTN_WEST) */ 0x09, 0x01, /* Usage (BTN_SOUTH) */ 0x09, 0x02, /* Usage (BTN_EAST) */ 0x09, 0x04, /* Usage (BTN_NORTH) */ 0x09, 0x07, /* Usage (BTN_TL) */ 0x09, 0x08, /* Usage (BTN_TR) */ 0x09, 0x09, /* Usage (BTN_TL2) */ 0x09, 0x0A, /* Usage (BTN_TR2) */ 0x09, 0x0B, /* Usage (BTN_SELECT) */ 0x09, 0x0C, /* Usage (BTN_START) */ 0x09, 0x0E, /* Usage (BTN_THUMBL) */ 0x09, 0x0F, /* Usage (BTN_THUMBR) */ 0x09, 0x0D, /* Usage (BTN_MODE) */ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x75, 0x01, /* Report Size (1) */ 0x95, 0x03, /* Report Count (3) */ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x25, 0x07, /* Logical Maximum (7) */ 0x46, 0x3B, 0x01, /* Physical Maximum (315) */ 0x75, 0x04, /* Report Size (4) */ 0x95, 0x01, /* Report Count (1) */ 0x65, 0x14, /* Unit (System: English Rotation, Length: Centimeter) */ 0x09, 0x39, /* Usage (Hat switch) */ 0x81, 0x42, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State) */ 0x65, 0x00, /* Unit (None) */ 0x95, 0x01, /* Report Count (1) */ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ 0x09, 0x30, /* Usage (X) */ 0x09, 0x31, /* Usage (Y) */ 0x09, 0x33, /* Usage (Rx) */ 0x09, 0x34, /* Usage (Ry) */ 0x75, 0x08, /* Report Size (8) */ 0x95, 0x04, /* Report Count (4) */ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x95, 0x0A, /* Report Count (10) */ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */ 0x09, 0x32, /* Usage (Z) */ 0x09, 0x35, /* Usage (Rz) */ 0x95, 0x02, /* Report Count (2) */ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x95, 0x08, /* Report Count (8) */ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined 0xFF00) */ 0xB1, 0x02, /* Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */ 0x0A, 0x21, 0x26, /* Usage (0x2621) */ 0x95, 0x08, /* Report Count (8) */ 0x91, 0x02, /* Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */ 0x0A, 0x21, 0x26, /* Usage (0x2621) */ 0x95, 0x08, /* Report Count (8) */ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0xC0, /* End Collection */ }; #define NUM_LEDS 4 struct bigben_device { struct hid_device *hid; struct hid_report *report; spinlock_t lock; bool removed; u8 led_state; /* LED1 = 1 .. LED4 = 8 */ u8 right_motor_on; /* right motor off/on 0/1 */ u8 left_motor_force; /* left motor force 0-255 */ struct led_classdev *leds[NUM_LEDS]; bool work_led; bool work_ff; struct work_struct worker; }; static inline void bigben_schedule_work(struct bigben_device *bigben) { unsigned long flags; spin_lock_irqsave(&bigben->lock, flags); if (!bigben->removed) schedule_work(&bigben->worker); spin_unlock_irqrestore(&bigben->lock, flags); } static void bigben_worker(struct work_struct *work) { struct bigben_device *bigben = container_of(work, struct bigben_device, worker); struct hid_field *report_field = bigben->report->field[0]; bool do_work_led = false; bool do_work_ff = false; u8 *buf; u32 len; unsigned long flags; buf = hid_alloc_report_buf(bigben->report, GFP_KERNEL); if (!buf) return; len = hid_report_len(bigben->report); /* LED work */ spin_lock_irqsave(&bigben->lock, flags); if (bigben->work_led) { bigben->work_led = false; do_work_led = true; report_field->value[0] = 0x01; /* 1 = led message */ report_field->value[1] = 0x08; /* reserved value, always 8 */ report_field->value[2] = bigben->led_state; report_field->value[3] = 0x00; /* padding */ report_field->value[4] = 0x00; /* padding */ report_field->value[5] = 0x00; /* padding */ report_field->value[6] = 0x00; /* padding */ report_field->value[7] = 0x00; /* padding */ hid_output_report(bigben->report, buf); } spin_unlock_irqrestore(&bigben->lock, flags); if (do_work_led) { hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len, bigben->report->type, HID_REQ_SET_REPORT); } /* FF work */ spin_lock_irqsave(&bigben->lock, flags); if (bigben->work_ff) { bigben->work_ff = false; do_work_ff = true; report_field->value[0] = 0x02; /* 2 = rumble effect message */ report_field->value[1] = 0x08; /* reserved value, always 8 */ report_field->value[2] = bigben->right_motor_on; report_field->value[3] = bigben->left_motor_force; report_field->value[4] = 0xff; /* duration 0-254 (255 = nonstop) */ report_field->value[5] = 0x00; /* padding */ report_field->value[6] = 0x00; /* padding */ report_field->value[7] = 0x00; /* padding */ hid_output_report(bigben->report, buf); } spin_unlock_irqrestore(&bigben->lock, flags); if (do_work_ff) { hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len, bigben->report->type, HID_REQ_SET_REPORT); } kfree(buf); } static int hid_bigben_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct bigben_device *bigben = hid_get_drvdata(hid); u8 right_motor_on; u8 left_motor_force; unsigned long flags; if (!bigben) { hid_err(hid, "no device data\n"); return 0; } if (effect->type != FF_RUMBLE) return 0; right_motor_on = effect->u.rumble.weak_magnitude ? 1 : 0; left_motor_force = effect->u.rumble.strong_magnitude / 256; if (right_motor_on != bigben->right_motor_on || left_motor_force != bigben->left_motor_force) { spin_lock_irqsave(&bigben->lock, flags); bigben->right_motor_on = right_motor_on; bigben->left_motor_force = left_motor_force; bigben->work_ff = true; spin_unlock_irqrestore(&bigben->lock, flags); bigben_schedule_work(bigben); } return 0; } static void bigben_set_led(struct led_classdev *led, enum led_brightness value) { struct device *dev = led->dev->parent; struct hid_device *hid = to_hid_device(dev); struct bigben_device *bigben = hid_get_drvdata(hid); int n; bool work; unsigned long flags; if (!bigben) { hid_err(hid, "no device data\n"); return; } for (n = 0; n < NUM_LEDS; n++) { if (led == bigben->leds[n]) { spin_lock_irqsave(&bigben->lock, flags); if (value == LED_OFF) { work = (bigben->led_state & BIT(n)); bigben->led_state &= ~BIT(n); } else { work = !(bigben->led_state & BIT(n)); bigben->led_state |= BIT(n); } spin_unlock_irqrestore(&bigben->lock, flags); if (work) { bigben->work_led = true; bigben_schedule_work(bigben); } return; } } } static enum led_brightness bigben_get_led(struct led_classdev *led) { struct device *dev = led->dev->parent; struct hid_device *hid = to_hid_device(dev); struct bigben_device *bigben = hid_get_drvdata(hid); int n; if (!bigben) { hid_err(hid, "no device data\n"); return LED_OFF; } for (n = 0; n < NUM_LEDS; n++) { if (led == bigben->leds[n]) return (bigben->led_state & BIT(n)) ? LED_ON : LED_OFF; } return LED_OFF; } static void bigben_remove(struct hid_device *hid) { struct bigben_device *bigben = hid_get_drvdata(hid); unsigned long flags; spin_lock_irqsave(&bigben->lock, flags); bigben->removed = true; spin_unlock_irqrestore(&bigben->lock, flags); cancel_work_sync(&bigben->worker); hid_hw_stop(hid); } static int bigben_probe(struct hid_device *hid, const struct hid_device_id *id) { struct bigben_device *bigben; struct hid_input *hidinput; struct led_classdev *led; char *name; size_t name_sz; int n, error; bigben = devm_kzalloc(&hid->dev, sizeof(*bigben), GFP_KERNEL); if (!bigben) return -ENOMEM; hid_set_drvdata(hid, bigben); bigben->hid = hid; bigben->removed = false; error = hid_parse(hid); if (error) { hid_err(hid, "parse failed\n"); return error; } error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (error) { hid_err(hid, "hw start failed\n"); return error; } bigben->report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 8); if (!bigben->report) { hid_err(hid, "no output report found\n"); error = -ENODEV; goto error_hw_stop; } if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); error = -ENODEV; goto error_hw_stop; } hidinput = list_first_entry(&hid->inputs, struct hid_input, list); set_bit(FF_RUMBLE, hidinput->input->ffbit); INIT_WORK(&bigben->worker, bigben_worker); spin_lock_init(&bigben->lock); error = input_ff_create_memless(hidinput->input, NULL, hid_bigben_play_effect); if (error) goto error_hw_stop; name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1; for (n = 0; n < NUM_LEDS; n++) { led = devm_kzalloc( &hid->dev, sizeof(struct led_classdev) + name_sz, GFP_KERNEL ); if (!led) { error = -ENOMEM; goto error_hw_stop; } name = (void *)(&led[1]); snprintf(name, name_sz, "%s:red:bigben%d", dev_name(&hid->dev), n + 1 ); led->name = name; led->brightness = (n == 0) ? LED_ON : LED_OFF; led->max_brightness = 1; led->brightness_get = bigben_get_led; led->brightness_set = bigben_set_led; bigben->leds[n] = led; error = devm_led_classdev_register(&hid->dev, led); if (error) goto error_hw_stop; } /* initial state: LED1 is on, no rumble effect */ bigben->led_state = BIT(0); bigben->right_motor_on = 0; bigben->left_motor_force = 0; bigben->work_led = true; bigben->work_ff = true; bigben_schedule_work(bigben); hid_info(hid, "LED and force feedback support for BigBen gamepad\n"); return 0; error_hw_stop: hid_hw_stop(hid); return error; } static const __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc, unsigned int *rsize) { if (*rsize == PID0902_RDESC_ORIG_SIZE) { *rsize = sizeof(pid0902_rdesc_fixed); return pid0902_rdesc_fixed; } else hid_warn(hid, "unexpected rdesc, please submit for review\n"); return rdesc; } static const struct hid_device_id bigben_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_BIGBEN, USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD) }, { } }; MODULE_DEVICE_TABLE(hid, bigben_devices); static struct hid_driver bigben_driver = { .name = "bigben", .id_table = bigben_devices, .probe = bigben_probe, .report_fixup = bigben_report_fixup, .remove = bigben_remove, }; module_hid_driver(bigben_driver); MODULE_DESCRIPTION("LED & force feedback support for BigBen Interactive"); MODULE_LICENSE("GPL");
15 3165 207 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NODEMASK_H #define __LINUX_NODEMASK_H /* * Nodemasks provide a bitmap suitable for representing the * set of Node's in a system, one bit position per Node number. * * See detailed comments in the file linux/bitmap.h describing the * data type on which these nodemasks are based. * * For details of nodemask_parse_user(), see bitmap_parse_user() in * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in * lib/bitmap.c. * * The available nodemask operations are: * * void node_set(node, mask) turn on bit 'node' in mask * void node_clear(node, mask) turn off bit 'node' in mask * void nodes_setall(mask) set all bits * void nodes_clear(mask) clear all bits * int node_isset(node, mask) true iff bit 'node' set in mask * int node_test_and_set(node, mask) test and set bit 'node' in mask * * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 * void nodes_complement(dst, src) dst = ~src * * int nodes_equal(mask1, mask2) Does mask1 == mask2? * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? * int nodes_empty(mask) Is mask empty (no bits sets)? * int nodes_full(mask) Is mask full (all bits sets)? * int nodes_weight(mask) Hamming weight - number of set bits * * void nodes_shift_right(dst, src, n) Shift right * void nodes_shift_left(dst, src, n) Shift left * * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, * or MAX_NUMNODES * unsigned int first_unset_node(mask) First node not set in mask, or * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * NODE_MASK_ALL Initializer - all bits set * NODE_MASK_NONE Initializer - no bits set * unsigned long *nodes_addr(mask) Array of unsigned long's in mask * * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask * int nodelist_parse(buf, map) Parse ascii string as nodelist * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz * * for_each_node_mask(node, mask) for-loop node over mask * * int num_online_nodes() Number of online Nodes * int num_possible_nodes() Number of all possible Nodes * * int node_random(mask) Random node with set bit in mask * * int node_online(node) Is some node online? * int node_possible(node) Is some node possible? * * node_set_online(node) set bit 'node' in node_online_map * node_set_offline(node) clear bit 'node' in node_online_map * * for_each_node(node) for-loop node over node_possible_map * for_each_online_node(node) for-loop node over node_online_map * * Subtlety: * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) * to generate slightly worse code. So use a simple one-line #define * for node_isset(), instead of wrapping an inline inside a macro, the * way we do the other calls. * * NODEMASK_SCRATCH * When doing above logical AND, OR, XOR, Remap operations the callers tend to * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper * for such situations. See below and CPUMASK_ALLOC also. */ #include <linux/threads.h> #include <linux/bitmap.h> #include <linux/minmax.h> #include <linux/nodemask_types.h> #include <linux/random.h> extern nodemask_t _unused_nodemask_arg_; /** * nodemask_pr_args - printf args to output a nodemask * @maskp: nodemask to be printed * * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. */ #define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ __nodemask_pr_bits(maskp) static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) { return m ? MAX_NUMNODES : 0; } static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) { return m ? m->bits : NULL; } /* * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not * inlined we will end up with a section mismatch error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline */ #define node_set(node, dst) __node_set((node), &(dst)) static __always_inline void __node_set(int node, volatile nodemask_t *dstp) { set_bit(node, dstp->bits); } #define node_clear(node, dst) __node_clear((node), &(dst)) static __always_inline void __node_clear(int node, volatile nodemask_t *dstp) { clear_bit(node, dstp->bits); } #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) { bitmap_fill(dstp->bits, nbits); } #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) { bitmap_zero(dstp->bits, nbits); } /* No static inline type checking - see Subtlety (1) above. */ #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) #define node_test_and_set(node, nodemask) \ __node_test_and_set((node), &(nodemask)) static __always_inline bool __node_test_and_set(int node, nodemask_t *addr) { return test_and_set_bit(node, addr->bits); } #define nodes_and(dst, src1, src2) \ __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_or(dst, src1, src2) \ __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_xor(dst, src1, src2) \ __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_andnot(dst, src1, src2) \ __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES) static __always_inline void __nodes_copy(nodemask_t *dstp, const nodemask_t *srcp, unsigned int nbits) { bitmap_copy(dstp->bits, srcp->bits, nbits); } #define nodes_complement(dst, src) \ __nodes_complement(&(dst), &(src), MAX_NUMNODES) static __always_inline void __nodes_complement(nodemask_t *dstp, const nodemask_t *srcp, unsigned int nbits) { bitmap_complement(dstp->bits, srcp->bits, nbits); } #define nodes_equal(src1, src2) \ __nodes_equal(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_equal(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); } #define nodes_intersects(src1, src2) \ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_intersects(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); } #define nodes_subset(src1, src2) \ __nodes_subset(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_subset(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) { return bitmap_empty(srcp->bits, nbits); } #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) { return bitmap_full(srcp->bits, nbits); } #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) { return bitmap_weight(srcp->bits, nbits); } #define nodes_shift_right(dst, src, n) \ __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) static __always_inline void __nodes_shift_right(nodemask_t *dstp, const nodemask_t *srcp, int n, int nbits) { bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); } #define nodes_shift_left(dst, src, n) \ __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) static __always_inline void __nodes_shift_left(nodemask_t *dstp, const nodemask_t *srcp, int n, int nbits) { bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } /* FIXME: better would be to fix all architectures to never return > MAX_NUMNODES, then the silly min_ts could be dropped. */ #define first_node(src) __first_node(&(src)) static __always_inline unsigned int __first_node(const nodemask_t *srcp) { return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); } #define next_node(n, src) __next_node((n), &(src)) static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp) { return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } /* * Find the next present node in src, starting after node n, wrapping around to * the first node in src if needed. Returns MAX_NUMNODES if src is empty. */ #define next_node_in(n, src) __next_node_in((n), &(src)) static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp) { unsigned int ret = __next_node(node, srcp); if (ret == MAX_NUMNODES) ret = __first_node(srcp); return ret; } static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node) { nodes_clear(*mask); node_set(node, *mask); } #define nodemask_of_node(node) \ ({ \ typeof(_unused_nodemask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ m.bits[0] = 1UL << (node); \ } else { \ init_nodemask_of_node(&m, (node)); \ } \ m; \ }) #define first_unset_node(mask) __first_unset_node(&(mask)) static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp) { return min_t(unsigned int, MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES)); } #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) #if MAX_NUMNODES <= BITS_PER_LONG #define NODE_MASK_ALL \ ((nodemask_t) { { \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #else #define NODE_MASK_ALL \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #endif #define NODE_MASK_NONE \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ } }) #define nodes_addr(src) ((src).bits) #define nodemask_parse_user(ubuf, ulen, dst) \ __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) static __always_inline int __nodemask_parse_user(const char __user *buf, int len, nodemask_t *dstp, int nbits) { return bitmap_parse_user(buf, len, dstp->bits, nbits); } #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) { return bitmap_parselist(buf, dstp->bits, nbits); } #define node_remap(oldbit, old, new) \ __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) static __always_inline int __node_remap(int oldbit, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); } #define nodes_remap(dst, src, old, new) \ __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } #define nodes_onto(dst, orig, relmap) \ __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, const nodemask_t *relmapp, int nbits) { bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); } #define nodes_fold(dst, orig, sz) \ __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, int sz, int nbits) { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } #if MAX_NUMNODES > 1 #define for_each_node_mask(node, mask) \ for ((node) = first_node(mask); \ (node) < MAX_NUMNODES; \ (node) = next_node((node), (mask))) #else /* MAX_NUMNODES == 1 */ #define for_each_node_mask(node, mask) \ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++) #endif /* MAX_NUMNODES */ /* * Bitmasks that are kept for all the nodes. */ enum node_states { N_POSSIBLE, /* The node could become online at some point */ N_ONLINE, /* The node is online */ N_NORMAL_MEMORY, /* The node has regular memory */ #ifdef CONFIG_HIGHMEM N_HIGH_MEMORY, /* The node has regular or high memory */ #else N_HIGH_MEMORY = N_NORMAL_MEMORY, #endif N_MEMORY, /* The node has memory(regular, high, movable) */ N_CPU, /* The node has one or more cpus */ N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */ NR_NODE_STATES }; /* * The following particular system nodemasks and operations * on them manage all possible and online nodes. */ extern nodemask_t node_states[NR_NODE_STATES]; #if MAX_NUMNODES > 1 static __always_inline int node_state(int node, enum node_states state) { return node_isset(node, node_states[state]); } static __always_inline void node_set_state(int node, enum node_states state) { __node_set(node, &node_states[state]); } static __always_inline void node_clear_state(int node, enum node_states state) { __node_clear(node, &node_states[state]); } static __always_inline int num_node_state(enum node_states state) { return nodes_weight(node_states[state]); } #define for_each_node_state(__node, __state) \ for_each_node_mask((__node), node_states[__state]) #define first_online_node first_node(node_states[N_ONLINE]) #define first_memory_node first_node(node_states[N_MEMORY]) static __always_inline unsigned int next_online_node(int nid) { return next_node(nid, node_states[N_ONLINE]); } static __always_inline unsigned int next_memory_node(int nid) { return next_node(nid, node_states[N_MEMORY]); } extern unsigned int nr_node_ids; extern unsigned int nr_online_nodes; static __always_inline void node_set_online(int nid) { node_set_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } static __always_inline void node_set_offline(int nid) { node_clear_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } #else static __always_inline int node_state(int node, enum node_states state) { return node == 0; } static __always_inline void node_set_state(int node, enum node_states state) { } static __always_inline void node_clear_state(int node, enum node_states state) { } static __always_inline int num_node_state(enum node_states state) { return 1; } #define for_each_node_state(node, __state) \ for ( (node) = 0; (node) == 0; (node) = 1) #define first_online_node 0 #define first_memory_node 0 #define next_online_node(nid) (MAX_NUMNODES) #define next_memory_node(nid) (MAX_NUMNODES) #define nr_node_ids 1U #define nr_online_nodes 1U #define node_set_online(node) node_set_state((node), N_ONLINE) #define node_set_offline(node) node_clear_state((node), N_ONLINE) #endif static __always_inline int node_random(const nodemask_t *maskp) { #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) int w, bit; w = nodes_weight(*maskp); switch (w) { case 0: bit = NUMA_NO_NODE; break; case 1: bit = first_node(*maskp); break; default: bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w)); break; } return bit; #else return 0; #endif } #define node_online_map node_states[N_ONLINE] #define node_possible_map node_states[N_POSSIBLE] #define num_online_nodes() num_node_state(N_ONLINE) #define num_possible_nodes() num_node_state(N_POSSIBLE) #define node_online(node) node_state((node), N_ONLINE) #define node_possible(node) node_state((node), N_POSSIBLE) #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) /* * For nodemask scratch area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ #define NODEMASK_ALLOC(type, name, gfp_flags) \ type *name = kmalloc(sizeof(*name), gfp_flags) #define NODEMASK_FREE(m) kfree(m) #else #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name #define NODEMASK_FREE(m) do {} while (0) #endif /* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; }; #define NODEMASK_SCRATCH(x) \ NODEMASK_ALLOC(struct nodemask_scratch, x, \ GFP_KERNEL | __GFP_NORETRY) #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) #endif /* __LINUX_NODEMASK_H */
51 36 42 51 51 15 51 51 15 15 15 51 51 42 31 31 42 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 // SPDX-License-Identifier: GPL-2.0-only /* * LED Class Core * * Copyright 2005-2006 Openedhand Ltd. * * Author: Richard Purdie <rpurdie@openedhand.com> */ #include <linux/kernel.h> #include <linux/led-class-multicolor.h> #include <linux/leds.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/property.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <uapi/linux/uleds.h> #include "leds.h" DECLARE_RWSEM(leds_list_lock); EXPORT_SYMBOL_GPL(leds_list_lock); LIST_HEAD(leds_list); EXPORT_SYMBOL_GPL(leds_list); static const char * const led_colors[LED_COLOR_ID_MAX] = { [LED_COLOR_ID_WHITE] = "white", [LED_COLOR_ID_RED] = "red", [LED_COLOR_ID_GREEN] = "green", [LED_COLOR_ID_BLUE] = "blue", [LED_COLOR_ID_AMBER] = "amber", [LED_COLOR_ID_VIOLET] = "violet", [LED_COLOR_ID_YELLOW] = "yellow", [LED_COLOR_ID_IR] = "ir", [LED_COLOR_ID_MULTI] = "multicolor", [LED_COLOR_ID_RGB] = "rgb", [LED_COLOR_ID_PURPLE] = "purple", [LED_COLOR_ID_ORANGE] = "orange", [LED_COLOR_ID_PINK] = "pink", [LED_COLOR_ID_CYAN] = "cyan", [LED_COLOR_ID_LIME] = "lime", }; static int __led_set_brightness(struct led_classdev *led_cdev, unsigned int value) { if (!led_cdev->brightness_set) return -ENOTSUPP; led_cdev->brightness_set(led_cdev, value); return 0; } static int __led_set_brightness_blocking(struct led_classdev *led_cdev, unsigned int value) { if (!led_cdev->brightness_set_blocking) return -ENOTSUPP; return led_cdev->brightness_set_blocking(led_cdev, value); } static void led_timer_function(struct timer_list *t) { struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer); unsigned long brightness; unsigned long delay; if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { led_set_brightness_nosleep(led_cdev, LED_OFF); clear_bit(LED_BLINK_SW, &led_cdev->work_flags); return; } if (test_and_clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags)) { clear_bit(LED_BLINK_SW, &led_cdev->work_flags); return; } brightness = led_get_brightness(led_cdev); if (!brightness) { /* Time to switch the LED on. */ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags)) brightness = led_cdev->new_blink_brightness; else brightness = led_cdev->blink_brightness; delay = led_cdev->blink_delay_on; } else { /* Store the current brightness value to be able * to restore it when the delay_off period is over. */ led_cdev->blink_brightness = brightness; brightness = LED_OFF; delay = led_cdev->blink_delay_off; } led_set_brightness_nosleep(led_cdev, brightness); /* Return in next iteration if led is in one-shot mode and we are in * the final blink state so that the led is toggled each delay_on + * delay_off milliseconds in worst case. */ if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags)) { if (test_bit(LED_BLINK_INVERT, &led_cdev->work_flags)) { if (brightness) set_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); } else { if (!brightness) set_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); } } mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); } static void set_brightness_delayed_set_brightness(struct led_classdev *led_cdev, unsigned int value) { int ret; ret = __led_set_brightness(led_cdev, value); if (ret == -ENOTSUPP) { ret = __led_set_brightness_blocking(led_cdev, value); if (ret == -ENOTSUPP) /* No back-end support to set a fixed brightness value */ return; } /* LED HW might have been unplugged, therefore don't warn */ if (ret == -ENODEV && led_cdev->flags & LED_UNREGISTERING && led_cdev->flags & LED_HW_PLUGGABLE) return; if (ret < 0) dev_err(led_cdev->dev, "Setting an LED's brightness failed (%d)\n", ret); } static void set_brightness_delayed(struct work_struct *ws) { struct led_classdev *led_cdev = container_of(ws, struct led_classdev, set_brightness_work); if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) { led_stop_software_blink(led_cdev); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); } /* * Triggers may call led_set_brightness(LED_OFF), * led_set_brightness(LED_FULL) in quick succession to disable blinking * and turn the LED on. Both actions may have been scheduled to run * before this work item runs once. To make sure this works properly * handle LED_SET_BRIGHTNESS_OFF first. */ if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) { set_brightness_delayed_set_brightness(led_cdev, LED_OFF); /* * The consecutives led_set_brightness(LED_OFF), * led_set_brightness(LED_FULL) could have been executed out of * order (LED_FULL first), if the work_flags has been set * between LED_SET_BRIGHTNESS_OFF and LED_SET_BRIGHTNESS of this * work. To avoid ending with the LED turned off, turn the LED * on again. */ if (led_cdev->delayed_set_value != LED_OFF) set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); } if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags)) set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value); if (test_and_clear_bit(LED_SET_BLINK, &led_cdev->work_flags)) { unsigned long delay_on = led_cdev->delayed_delay_on; unsigned long delay_off = led_cdev->delayed_delay_off; led_blink_set(led_cdev, &delay_on, &delay_off); } } static void led_set_software_blink(struct led_classdev *led_cdev, unsigned long delay_on, unsigned long delay_off) { int current_brightness; current_brightness = led_get_brightness(led_cdev); if (current_brightness) led_cdev->blink_brightness = current_brightness; if (!led_cdev->blink_brightness) led_cdev->blink_brightness = led_cdev->max_brightness; led_cdev->blink_delay_on = delay_on; led_cdev->blink_delay_off = delay_off; /* never on - just set to off */ if (!delay_on) { led_set_brightness_nosleep(led_cdev, LED_OFF); return; } /* never off - just set to brightness */ if (!delay_off) { led_set_brightness_nosleep(led_cdev, led_cdev->blink_brightness); return; } set_bit(LED_BLINK_SW, &led_cdev->work_flags); mod_timer(&led_cdev->blink_timer, jiffies + 1); } static void led_blink_setup(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) && led_cdev->blink_set && !led_cdev->blink_set(led_cdev, delay_on, delay_off)) return; /* blink with 1 Hz as default if nothing specified */ if (!*delay_on && !*delay_off) *delay_on = *delay_off = 500; led_set_software_blink(led_cdev, *delay_on, *delay_off); } void led_init_core(struct led_classdev *led_cdev) { INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed); timer_setup(&led_cdev->blink_timer, led_timer_function, 0); } EXPORT_SYMBOL_GPL(led_init_core); void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { timer_delete_sync(&led_cdev->blink_timer); clear_bit(LED_BLINK_SW, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL_GPL(led_blink_set); void led_blink_set_oneshot(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off, int invert) { if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) && timer_pending(&led_cdev->blink_timer)) return; set_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); if (invert) set_bit(LED_BLINK_INVERT, &led_cdev->work_flags); else clear_bit(LED_BLINK_INVERT, &led_cdev->work_flags); led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL_GPL(led_blink_set_oneshot); void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on, unsigned long delay_off) { /* If necessary delegate to a work queue task. */ if (led_cdev->blink_set && led_cdev->brightness_set_blocking) { led_cdev->delayed_delay_on = delay_on; led_cdev->delayed_delay_off = delay_off; set_bit(LED_SET_BLINK, &led_cdev->work_flags); queue_work(led_cdev->wq, &led_cdev->set_brightness_work); return; } led_blink_set(led_cdev, &delay_on, &delay_off); } EXPORT_SYMBOL_GPL(led_blink_set_nosleep); void led_stop_software_blink(struct led_classdev *led_cdev) { timer_delete_sync(&led_cdev->blink_timer); led_cdev->blink_delay_on = 0; led_cdev->blink_delay_off = 0; clear_bit(LED_BLINK_SW, &led_cdev->work_flags); } EXPORT_SYMBOL_GPL(led_stop_software_blink); void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness) { /* * If software blink is active, delay brightness setting * until the next timer tick. */ if (test_bit(LED_BLINK_SW, &led_cdev->work_flags)) { /* * If we need to disable soft blinking delegate this to the * work queue task to avoid problems in case we are called * from hard irq context. */ if (!brightness) { set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags); queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } else { set_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags); led_cdev->new_blink_brightness = brightness; } return; } led_set_brightness_nosleep(led_cdev, brightness); } EXPORT_SYMBOL_GPL(led_set_brightness); void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value) { /* Use brightness_set op if available, it is guaranteed not to sleep */ if (!__led_set_brightness(led_cdev, value)) return; /* * Brightness setting can sleep, delegate it to a work queue task. * value 0 / LED_OFF is special, since it also disables hw-blinking * (sw-blink disable is handled in led_set_brightness()). * To avoid a hw-blink-disable getting lost when a second brightness * change is done immediately afterwards (before the work runs), * it uses a separate work_flag. */ led_cdev->delayed_set_value = value; /* Ensure delayed_set_value is seen before work_flags modification */ smp_mb__before_atomic(); if (value) set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); else { clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); clear_bit(LED_SET_BLINK, &led_cdev->work_flags); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); } queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_set_brightness_nopm); void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value) { led_cdev->brightness = min(value, led_cdev->max_brightness); if (led_cdev->flags & LED_SUSPENDED) return; led_set_brightness_nopm(led_cdev, led_cdev->brightness); } EXPORT_SYMBOL_GPL(led_set_brightness_nosleep); int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value) { if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) return -EBUSY; led_cdev->brightness = min(value, led_cdev->max_brightness); if (led_cdev->flags & LED_SUSPENDED) return 0; return __led_set_brightness_blocking(led_cdev, led_cdev->brightness); } EXPORT_SYMBOL_GPL(led_set_brightness_sync); /* * This is a led-core function because just like led_set_brightness() * it is used in the kernel by e.g. triggers. */ void led_mc_set_brightness(struct led_classdev *led_cdev, unsigned int *intensity_value, unsigned int num_colors, unsigned int brightness) { struct led_classdev_mc *mcled_cdev; unsigned int i; if (!(led_cdev->flags & LED_MULTI_COLOR)) { dev_err_once(led_cdev->dev, "error not a multi-color LED\n"); return; } mcled_cdev = lcdev_to_mccdev(led_cdev); if (num_colors != mcled_cdev->num_colors) { dev_err_once(led_cdev->dev, "error num_colors mismatch %u != %u\n", num_colors, mcled_cdev->num_colors); return; } for (i = 0; i < mcled_cdev->num_colors; i++) mcled_cdev->subled_info[i].intensity = intensity_value[i]; led_set_brightness(led_cdev, brightness); } EXPORT_SYMBOL_GPL(led_mc_set_brightness); int led_update_brightness(struct led_classdev *led_cdev) { int ret; if (led_cdev->brightness_get) { ret = led_cdev->brightness_get(led_cdev); if (ret < 0) return ret; led_cdev->brightness = ret; } return 0; } EXPORT_SYMBOL_GPL(led_update_brightness); u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size) { struct fwnode_handle *fwnode = led_cdev->dev->fwnode; u32 *pattern; int count; count = fwnode_property_count_u32(fwnode, "led-pattern"); if (count < 0) return NULL; pattern = kcalloc(count, sizeof(*pattern), GFP_KERNEL); if (!pattern) return NULL; if (fwnode_property_read_u32_array(fwnode, "led-pattern", pattern, count)) { kfree(pattern); return NULL; } *size = count; return pattern; } EXPORT_SYMBOL_GPL(led_get_default_pattern); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_disable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags |= LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_disable); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_enable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags &= ~LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_enable); static void led_parse_fwnode_props(struct device *dev, struct fwnode_handle *fwnode, struct led_properties *props) { int ret; if (!fwnode) return; if (fwnode_property_present(fwnode, "label")) { ret = fwnode_property_read_string(fwnode, "label", &props->label); if (ret) dev_err(dev, "Error parsing 'label' property (%d)\n", ret); return; } if (fwnode_property_present(fwnode, "color")) { ret = fwnode_property_read_u32(fwnode, "color", &props->color); if (ret) dev_err(dev, "Error parsing 'color' property (%d)\n", ret); else if (props->color >= LED_COLOR_ID_MAX) dev_err(dev, "LED color identifier out of range\n"); else props->color_present = true; } if (!fwnode_property_present(fwnode, "function")) return; ret = fwnode_property_read_string(fwnode, "function", &props->function); if (ret) { dev_err(dev, "Error parsing 'function' property (%d)\n", ret); } if (!fwnode_property_present(fwnode, "function-enumerator")) return; ret = fwnode_property_read_u32(fwnode, "function-enumerator", &props->func_enum); if (ret) { dev_err(dev, "Error parsing 'function-enumerator' property (%d)\n", ret); } else { props->func_enum_present = true; } } int led_compose_name(struct device *dev, struct led_init_data *init_data, char *led_classdev_name) { struct led_properties props = {}; struct fwnode_handle *fwnode = init_data->fwnode; const char *devicename = init_data->devicename; if (!led_classdev_name) return -EINVAL; led_parse_fwnode_props(dev, fwnode, &props); if (props.label) { /* * If init_data.devicename is NULL, then it indicates that * DT label should be used as-is for LED class device name. * Otherwise the label is prepended with devicename to compose * the final LED class device name. */ if (!devicename) { strscpy(led_classdev_name, props.label, LED_MAX_NAME_SIZE); } else { snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, props.label); } } else if (props.function || props.color_present) { char tmp_buf[LED_MAX_NAME_SIZE]; if (props.func_enum_present) { snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d", props.color_present ? led_colors[props.color] : "", props.function ?: "", props.func_enum); } else { snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s", props.color_present ? led_colors[props.color] : "", props.function ?: ""); } if (init_data->devname_mandatory) { snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, tmp_buf); } else { strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE); } } else if (init_data->default_label) { if (!devicename) { dev_err(dev, "Legacy LED naming requires devicename segment"); return -EINVAL; } snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, init_data->default_label); } else if (is_of_node(fwnode)) { strscpy(led_classdev_name, to_of_node(fwnode)->name, LED_MAX_NAME_SIZE); } else return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(led_compose_name); const char *led_get_color_name(u8 color_id) { if (color_id >= ARRAY_SIZE(led_colors)) return NULL; return led_colors[color_id]; } EXPORT_SYMBOL_GPL(led_get_color_name); enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode) { const char *state = NULL; if (!fwnode_property_read_string(fwnode, "default-state", &state)) { if (!strcmp(state, "keep")) return LEDS_DEFSTATE_KEEP; if (!strcmp(state, "on")) return LEDS_DEFSTATE_ON; } return LEDS_DEFSTATE_OFF; } EXPORT_SYMBOL_GPL(led_init_default_state_get);
3 3 3 3 3 3 3 3 3 3 3 3 3 3 6 2 4 3 3 6 6 2 4 1 3 3 3 2 2 2 1 1 1 1 1 1 2 3 3 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 // SPDX-License-Identifier: GPL-2.0 /* * Greybus "AP" USB driver for "ES2" controller chips * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/kthread.h> #include <linux/sizes.h> #include <linux/usb.h> #include <linux/kfifo.h> #include <linux/debugfs.h> #include <linux/list.h> #include <linux/greybus.h> #include <linux/unaligned.h> #include "arpc.h" #include "greybus_trace.h" /* Default timeout for USB vendor requests. */ #define ES2_USB_CTRL_TIMEOUT 500 /* Default timeout for ARPC CPort requests */ #define ES2_ARPC_CPORT_TIMEOUT 500 /* Fixed CPort numbers */ #define ES2_CPORT_CDSI0 16 #define ES2_CPORT_CDSI1 17 /* Memory sizes for the buffers sent to/from the ES2 controller */ #define ES2_GBUF_MSG_SIZE_MAX 2048 /* Memory sizes for the ARPC buffers */ #define ARPC_OUT_SIZE_MAX U16_MAX #define ARPC_IN_SIZE_MAX 128 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x18d1, 0x1eaf) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); #define APB1_LOG_SIZE SZ_16K /* * Number of CPort IN urbs in flight at any point in time. * Adjust if we are having stalls in the USB buffer due to not enough urbs in * flight. */ #define NUM_CPORT_IN_URB 4 /* Number of CPort OUT urbs in flight at any point in time. * Adjust if we get messages saying we are out of urbs in the system log. */ #define NUM_CPORT_OUT_URB 8 /* * Number of ARPC in urbs in flight at any point in time. */ #define NUM_ARPC_IN_URB 2 /* * @endpoint: bulk in endpoint for CPort data * @urb: array of urbs for the CPort in messages * @buffer: array of buffers for the @cport_in_urb urbs */ struct es2_cport_in { __u8 endpoint; struct urb *urb[NUM_CPORT_IN_URB]; u8 *buffer[NUM_CPORT_IN_URB]; }; /** * struct es2_ap_dev - ES2 USB Bridge to AP structure * @usb_dev: pointer to the USB device we are. * @usb_intf: pointer to the USB interface we are bound to. * @hd: pointer to our gb_host_device structure * * @cport_in: endpoint, urbs and buffer for cport in messages * @cport_out_endpoint: endpoint for cport out messages * @cport_out_urb: array of urbs for the CPort out messages * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or * not. * @cport_out_urb_cancelled: array of flags indicating whether the * corresponding @cport_out_urb is being cancelled * @cport_out_urb_lock: locks the @cport_out_urb_busy "list" * @cdsi1_in_use: true if cport CDSI1 is in use * @apb_log_task: task pointer for logging thread * @apb_log_dentry: file system entry for the log file interface * @apb_log_enable_dentry: file system entry for enabling logging * @apb_log_fifo: kernel FIFO to carry logged data * @arpc_urb: array of urbs for the ARPC in messages * @arpc_buffer: array of buffers for the @arpc_urb urbs * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC * @arpc_id_cycle: gives an unique id to ARPC * @arpc_lock: locks ARPC list * @arpcs: list of in progress ARPCs */ struct es2_ap_dev { struct usb_device *usb_dev; struct usb_interface *usb_intf; struct gb_host_device *hd; struct es2_cport_in cport_in; __u8 cport_out_endpoint; struct urb *cport_out_urb[NUM_CPORT_OUT_URB]; bool cport_out_urb_busy[NUM_CPORT_OUT_URB]; bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB]; spinlock_t cport_out_urb_lock; bool cdsi1_in_use; struct task_struct *apb_log_task; struct dentry *apb_log_dentry; struct dentry *apb_log_enable_dentry; DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE); __u8 arpc_endpoint_in; struct urb *arpc_urb[NUM_ARPC_IN_URB]; u8 *arpc_buffer[NUM_ARPC_IN_URB]; int arpc_id_cycle; spinlock_t arpc_lock; struct list_head arpcs; }; struct arpc { struct list_head list; struct arpc_request_message *req; struct arpc_response_message *resp; struct completion response_received; bool active; }; static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd) { return (struct es2_ap_dev *)&hd->hd_priv; } static void cport_out_callback(struct urb *urb); static void usb_log_enable(struct es2_ap_dev *es2); static void usb_log_disable(struct es2_ap_dev *es2); static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, size_t size, int *result, unsigned int timeout); static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) { struct usb_device *udev = es2->usb_dev; u8 *data; int retval; data = kmemdup(req, size, GFP_KERNEL); if (!data) return -ENOMEM; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, data, size, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "%s: return error %d\n", __func__, retval); else retval = 0; kfree(data); return retval; } static void ap_urb_complete(struct urb *urb) { struct usb_ctrlrequest *dr = urb->context; kfree(dr); usb_free_urb(urb); } static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd) { struct usb_device *udev = es2->usb_dev; struct urb *urb; struct usb_ctrlrequest *dr; u8 *buf; int retval; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } buf = (u8 *)dr + sizeof(*dr); memcpy(buf, req, size); dr->bRequest = cmd; dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE; dr->wValue = 0; dr->wIndex = 0; dr->wLength = cpu_to_le16(size); usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0), (unsigned char *)dr, buf, size, ap_urb_complete, dr); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) { usb_free_urb(urb); kfree(dr); } return retval; } static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, bool async) { struct es2_ap_dev *es2 = hd_to_es2(hd); if (async) return output_async(es2, req, size, cmd); return output_sync(es2, req, size, cmd); } static int es2_cport_in_enable(struct es2_ap_dev *es2, struct es2_cport_in *cport_in) { struct urb *urb; int ret; int i; for (i = 0; i < NUM_CPORT_IN_URB; ++i) { urb = cport_in->urb[i]; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&es2->usb_dev->dev, "failed to submit in-urb: %d\n", ret); goto err_kill_urbs; } } return 0; err_kill_urbs: for (--i; i >= 0; --i) { urb = cport_in->urb[i]; usb_kill_urb(urb); } return ret; } static void es2_cport_in_disable(struct es2_ap_dev *es2, struct es2_cport_in *cport_in) { struct urb *urb; int i; for (i = 0; i < NUM_CPORT_IN_URB; ++i) { urb = cport_in->urb[i]; usb_kill_urb(urb); } } static int es2_arpc_in_enable(struct es2_ap_dev *es2) { struct urb *urb; int ret; int i; for (i = 0; i < NUM_ARPC_IN_URB; ++i) { urb = es2->arpc_urb[i]; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&es2->usb_dev->dev, "failed to submit arpc in-urb: %d\n", ret); goto err_kill_urbs; } } return 0; err_kill_urbs: for (--i; i >= 0; --i) { urb = es2->arpc_urb[i]; usb_kill_urb(urb); } return ret; } static void es2_arpc_in_disable(struct es2_ap_dev *es2) { struct urb *urb; int i; for (i = 0; i < NUM_ARPC_IN_URB; ++i) { urb = es2->arpc_urb[i]; usb_kill_urb(urb); } } static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask) { struct urb *urb = NULL; unsigned long flags; int i; spin_lock_irqsave(&es2->cport_out_urb_lock, flags); /* Look in our pool of allocated urbs first, as that's the "fastest" */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (!es2->cport_out_urb_busy[i] && !es2->cport_out_urb_cancelled[i]) { es2->cport_out_urb_busy[i] = true; urb = es2->cport_out_urb[i]; break; } } spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); if (urb) return urb; /* * Crap, pool is empty, complain to the syslog and go allocate one * dynamically as we have to succeed. */ dev_dbg(&es2->usb_dev->dev, "No free CPort OUT urbs, having to dynamically allocate one!\n"); return usb_alloc_urb(0, gfp_mask); } static void free_urb(struct es2_ap_dev *es2, struct urb *urb) { unsigned long flags; int i; /* * See if this was an urb in our pool, if so mark it "free", otherwise * we need to free it ourselves. */ spin_lock_irqsave(&es2->cport_out_urb_lock, flags); for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (urb == es2->cport_out_urb[i]) { es2->cport_out_urb_busy[i] = false; urb = NULL; break; } } spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* If urb is not NULL, then we need to free this urb */ usb_free_urb(urb); } /* * We (ab)use the operation-message header pad bytes to transfer the * cport id in order to minimise overhead. */ static void gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id) { header->pad[0] = cport_id; } /* Clear the pad bytes used for the CPort id */ static void gb_message_cport_clear(struct gb_operation_msg_hdr *header) { header->pad[0] = 0; } /* Extract the CPort id packed into the header, and clear it */ static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header) { u16 cport_id = header->pad[0]; gb_message_cport_clear(header); return cport_id; } /* * Returns zero if the message was successfully queued, or a negative errno * otherwise. */ static int message_send(struct gb_host_device *hd, u16 cport_id, struct gb_message *message, gfp_t gfp_mask) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; size_t buffer_size; int retval; struct urb *urb; unsigned long flags; /* * The data actually transferred will include an indication * of where the data should be sent. Do one last check of * the target CPort id before filling it in. */ if (!cport_id_valid(hd, cport_id)) { dev_err(&udev->dev, "invalid cport %u\n", cport_id); return -EINVAL; } /* Find a free urb */ urb = next_free_urb(es2, gfp_mask); if (!urb) return -ENOMEM; spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = urb; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* Pack the cport id into the message header */ gb_message_cport_pack(message->header, cport_id); buffer_size = sizeof(*message->header) + message->payload_size; usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, es2->cport_out_endpoint), message->buffer, buffer_size, cport_out_callback, message); urb->transfer_flags |= URB_ZERO_PACKET; trace_gb_message_submit(message); retval = usb_submit_urb(urb, gfp_mask); if (retval) { dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval); spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = NULL; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); free_urb(es2, urb); gb_message_cport_clear(message->header); return retval; } return 0; } /* * Can not be called in atomic context. */ static void message_cancel(struct gb_message *message) { struct gb_host_device *hd = message->operation->connection->hd; struct es2_ap_dev *es2 = hd_to_es2(hd); struct urb *urb; int i; might_sleep(); spin_lock_irq(&es2->cport_out_urb_lock); urb = message->hcpriv; /* Prevent dynamically allocated urb from being deallocated. */ usb_get_urb(urb); /* Prevent pre-allocated urb from being reused. */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { if (urb == es2->cport_out_urb[i]) { es2->cport_out_urb_cancelled[i] = true; break; } } spin_unlock_irq(&es2->cport_out_urb_lock); usb_kill_urb(urb); if (i < NUM_CPORT_OUT_URB) { spin_lock_irq(&es2->cport_out_urb_lock); es2->cport_out_urb_cancelled[i] = false; spin_unlock_irq(&es2->cport_out_urb_lock); } usb_free_urb(urb); } static int es2_cport_allocate(struct gb_host_device *hd, int cport_id, unsigned long flags) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct ida *id_map = &hd->cport_id_map; int ida_start, ida_end; switch (cport_id) { case ES2_CPORT_CDSI0: case ES2_CPORT_CDSI1: dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EBUSY; } if (flags & GB_CONNECTION_FLAG_OFFLOADED && flags & GB_CONNECTION_FLAG_CDSI1) { if (es2->cdsi1_in_use) { dev_err(&hd->dev, "CDSI1 already in use\n"); return -EBUSY; } es2->cdsi1_in_use = true; return ES2_CPORT_CDSI1; } if (cport_id < 0) { ida_start = 0; ida_end = hd->num_cports - 1; } else if (cport_id < hd->num_cports) { ida_start = cport_id; ida_end = cport_id; } else { dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EINVAL; } return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL); } static void es2_cport_release(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); switch (cport_id) { case ES2_CPORT_CDSI1: es2->cdsi1_in_use = false; return; } ida_free(&hd->cport_id_map, cport_id); } static int cport_enable(struct gb_host_device *hd, u16 cport_id, unsigned long flags) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; struct gb_apb_request_cport_flags *req; u32 connection_flags; int ret; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; connection_flags = 0; if (flags & GB_CONNECTION_FLAG_CONTROL) connection_flags |= GB_APB_CPORT_FLAG_CONTROL; if (flags & GB_CONNECTION_FLAG_HIGH_PRIO) connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO; req->flags = cpu_to_le32(connection_flags); dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__, cport_id, connection_flags); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_CPORT_FLAGS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, req, sizeof(*req), ES2_USB_CTRL_TIMEOUT); if (ret < 0) { dev_err(&udev->dev, "failed to set cport flags for port %d\n", cport_id); goto out; } ret = 0; out: kfree(req); return ret; } static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_connected_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to set connected state for cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_flush_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id, u8 phase, unsigned int timeout) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_shutdown_req req; int result; int ret; if (timeout > U16_MAX) return -EINVAL; req.cport_id = cpu_to_le16(cport_id); req.timeout = cpu_to_le16(timeout); req.phase = phase; ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req), &result, ES2_ARPC_CPORT_TIMEOUT + timeout); if (ret) { dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n", cport_id, ret, result); return ret; } return 0; } static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id, size_t peer_space, unsigned int timeout) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_quiesce_req req; int result; int ret; if (peer_space > U16_MAX) return -EINVAL; if (timeout > U16_MAX) return -EINVAL; req.cport_id = cpu_to_le16(cport_id); req.peer_space = cpu_to_le16(peer_space); req.timeout = cpu_to_le16(timeout); ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req), &result, ES2_ARPC_CPORT_TIMEOUT + timeout); if (ret) { dev_err(dev, "failed to quiesce cport %u: %d (%d)\n", cport_id, ret, result); return ret; } return 0; } static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id) { struct es2_ap_dev *es2 = hd_to_es2(hd); struct device *dev = &es2->usb_dev->dev; struct arpc_cport_clear_req req; int ret; req.cport_id = cpu_to_le16(cport_id); ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req), NULL, ES2_ARPC_CPORT_TIMEOUT); if (ret) { dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret); return ret; } return 0; } static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id) { int retval; struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_LATENCY_TAG_EN, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, NULL, 0, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n", cport_id); return retval; } static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id) { int retval; struct es2_ap_dev *es2 = hd_to_es2(hd); struct usb_device *udev = es2->usb_dev; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_LATENCY_TAG_DIS, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, cport_id, 0, NULL, 0, ES2_USB_CTRL_TIMEOUT); if (retval < 0) dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n", cport_id); return retval; } static struct gb_hd_driver es2_driver = { .hd_priv_size = sizeof(struct es2_ap_dev), .message_send = message_send, .message_cancel = message_cancel, .cport_allocate = es2_cport_allocate, .cport_release = es2_cport_release, .cport_enable = cport_enable, .cport_connected = es2_cport_connected, .cport_flush = es2_cport_flush, .cport_shutdown = es2_cport_shutdown, .cport_quiesce = es2_cport_quiesce, .cport_clear = es2_cport_clear, .latency_tag_enable = latency_tag_enable, .latency_tag_disable = latency_tag_disable, .output = output, }; /* Common function to report consistent warnings based on URB status */ static int check_urb_status(struct urb *urb) { struct device *dev = &urb->dev->dev; int status = urb->status; switch (status) { case 0: return 0; case -EOVERFLOW: dev_err(dev, "%s: overflow actual length is %d\n", __func__, urb->actual_length); fallthrough; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -EILSEQ: case -EPROTO: /* device is gone, stop sending */ return status; } dev_err(dev, "%s: unknown status %d\n", __func__, status); return -EAGAIN; } static void es2_destroy(struct es2_ap_dev *es2) { struct usb_device *udev; struct urb *urb; int i; debugfs_remove(es2->apb_log_enable_dentry); usb_log_disable(es2); /* Tear down everything! */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { urb = es2->cport_out_urb[i]; usb_kill_urb(urb); usb_free_urb(urb); es2->cport_out_urb[i] = NULL; es2->cport_out_urb_busy[i] = false; /* just to be anal */ } for (i = 0; i < NUM_ARPC_IN_URB; ++i) { usb_free_urb(es2->arpc_urb[i]); kfree(es2->arpc_buffer[i]); es2->arpc_buffer[i] = NULL; } for (i = 0; i < NUM_CPORT_IN_URB; ++i) { usb_free_urb(es2->cport_in.urb[i]); kfree(es2->cport_in.buffer[i]); es2->cport_in.buffer[i] = NULL; } /* release reserved CDSI0 and CDSI1 cports */ gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1); gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0); udev = es2->usb_dev; gb_hd_put(es2->hd); usb_put_dev(udev); } static void cport_in_callback(struct urb *urb) { struct gb_host_device *hd = urb->context; struct device *dev = &urb->dev->dev; struct gb_operation_msg_hdr *header; int status = check_urb_status(urb); int retval; u16 cport_id; if (status) { if ((status == -EAGAIN) || (status == -EPROTO)) goto exit; /* The urb is being unlinked */ if (status == -ENOENT || status == -ESHUTDOWN) return; dev_err(dev, "urb cport in error %d (dropped)\n", status); return; } if (urb->actual_length < sizeof(*header)) { dev_err(dev, "short message received\n"); goto exit; } /* Extract the CPort id, which is packed in the message header */ header = urb->transfer_buffer; cport_id = gb_message_cport_unpack(header); if (cport_id_valid(hd, cport_id)) { greybus_data_rcvd(hd, cport_id, urb->transfer_buffer, urb->actual_length); } else { dev_err(dev, "invalid cport id %u received\n", cport_id); } exit: /* put our urb back in the request pool */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "failed to resubmit in-urb: %d\n", retval); } static void cport_out_callback(struct urb *urb) { struct gb_message *message = urb->context; struct gb_host_device *hd = message->operation->connection->hd; struct es2_ap_dev *es2 = hd_to_es2(hd); int status = check_urb_status(urb); unsigned long flags; gb_message_cport_clear(message->header); spin_lock_irqsave(&es2->cport_out_urb_lock, flags); message->hcpriv = NULL; spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags); /* * Tell the submitter that the message send (attempt) is * complete, and report the status. */ greybus_message_sent(hd, message, status); free_urb(es2, urb); } static struct arpc *arpc_alloc(void *payload, u16 size, u8 type) { struct arpc *rpc; if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX) return NULL; rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); if (!rpc) return NULL; INIT_LIST_HEAD(&rpc->list); rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL); if (!rpc->req) goto err_free_rpc; rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL); if (!rpc->resp) goto err_free_req; rpc->req->type = type; rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size); memcpy(rpc->req->data, payload, size); init_completion(&rpc->response_received); return rpc; err_free_req: kfree(rpc->req); err_free_rpc: kfree(rpc); return NULL; } static void arpc_free(struct arpc *rpc) { kfree(rpc->req); kfree(rpc->resp); kfree(rpc); } static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id) { struct arpc *rpc; list_for_each_entry(rpc, &es2->arpcs, list) { if (rpc->req->id == id) return rpc; } return NULL; } static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc) { rpc->active = true; rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++); list_add_tail(&rpc->list, &es2->arpcs); } static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc) { if (rpc->active) { rpc->active = false; list_del(&rpc->list); } } static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout) { struct usb_device *udev = es2->usb_dev; int retval; retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), GB_APB_REQUEST_ARPC_RUN, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, rpc->req, le16_to_cpu(rpc->req->size), ES2_USB_CTRL_TIMEOUT); if (retval < 0) { dev_err(&udev->dev, "failed to send ARPC request %d: %d\n", rpc->req->type, retval); return retval; } return 0; } static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload, size_t size, int *result, unsigned int timeout) { struct arpc *rpc; unsigned long flags; int retval; if (result) *result = 0; rpc = arpc_alloc(payload, size, type); if (!rpc) return -ENOMEM; spin_lock_irqsave(&es2->arpc_lock, flags); arpc_add(es2, rpc); spin_unlock_irqrestore(&es2->arpc_lock, flags); retval = arpc_send(es2, rpc, timeout); if (retval) goto out_arpc_del; retval = wait_for_completion_interruptible_timeout( &rpc->response_received, msecs_to_jiffies(timeout)); if (retval <= 0) { if (!retval) retval = -ETIMEDOUT; goto out_arpc_del; } if (rpc->resp->result) { retval = -EREMOTEIO; if (result) *result = rpc->resp->result; } else { retval = 0; } out_arpc_del: spin_lock_irqsave(&es2->arpc_lock, flags); arpc_del(es2, rpc); spin_unlock_irqrestore(&es2->arpc_lock, flags); arpc_free(rpc); if (retval < 0 && retval != -EREMOTEIO) { dev_err(&es2->usb_dev->dev, "failed to execute ARPC: %d\n", retval); } return retval; } static void arpc_in_callback(struct urb *urb) { struct es2_ap_dev *es2 = urb->context; struct device *dev = &urb->dev->dev; int status = check_urb_status(urb); struct arpc *rpc; struct arpc_response_message *resp; unsigned long flags; int retval; if (status) { if ((status == -EAGAIN) || (status == -EPROTO)) goto exit; /* The urb is being unlinked */ if (status == -ENOENT || status == -ESHUTDOWN) return; dev_err(dev, "arpc in-urb error %d (dropped)\n", status); return; } if (urb->actual_length < sizeof(*resp)) { dev_err(dev, "short aprc response received\n"); goto exit; } resp = urb->transfer_buffer; spin_lock_irqsave(&es2->arpc_lock, flags); rpc = arpc_find(es2, resp->id); if (!rpc) { dev_err(dev, "invalid arpc response id received: %u\n", le16_to_cpu(resp->id)); spin_unlock_irqrestore(&es2->arpc_lock, flags); goto exit; } arpc_del(es2, rpc); memcpy(rpc->resp, resp, sizeof(*resp)); complete(&rpc->response_received); spin_unlock_irqrestore(&es2->arpc_lock, flags); exit: /* put our urb back in the request pool */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval); } #define APB1_LOG_MSG_SIZE 64 static void apb_log_get(struct es2_ap_dev *es2, char *buf) { int retval; do { retval = usb_control_msg(es2->usb_dev, usb_rcvctrlpipe(es2->usb_dev, 0), GB_APB_REQUEST_LOG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x00, 0x00, buf, APB1_LOG_MSG_SIZE, ES2_USB_CTRL_TIMEOUT); if (retval > 0) kfifo_in(&es2->apb_log_fifo, buf, retval); } while (retval > 0); } static int apb_log_poll(void *data) { struct es2_ap_dev *es2 = data; char *buf; buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; while (!kthread_should_stop()) { msleep(1000); apb_log_get(es2, buf); } kfree(buf); return 0; } static ssize_t apb_log_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { struct es2_ap_dev *es2 = file_inode(f)->i_private; ssize_t ret; size_t copied; char *tmp_buf; if (count > APB1_LOG_SIZE) count = APB1_LOG_SIZE; tmp_buf = kmalloc(count, GFP_KERNEL); if (!tmp_buf) return -ENOMEM; copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count); ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied); kfree(tmp_buf); return ret; } static const struct file_operations apb_log_fops = { .read = apb_log_read, }; static void usb_log_enable(struct es2_ap_dev *es2) { if (!IS_ERR_OR_NULL(es2->apb_log_task)) return; /* get log from APB1 */ es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log"); if (IS_ERR(es2->apb_log_task)) return; /* XXX We will need to rename this per APB */ es2->apb_log_dentry = debugfs_create_file("apb_log", 0444, gb_debugfs_get(), es2, &apb_log_fops); } static void usb_log_disable(struct es2_ap_dev *es2) { if (IS_ERR_OR_NULL(es2->apb_log_task)) return; debugfs_remove(es2->apb_log_dentry); es2->apb_log_dentry = NULL; kthread_stop(es2->apb_log_task); es2->apb_log_task = NULL; } static ssize_t apb_log_enable_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { struct es2_ap_dev *es2 = file_inode(f)->i_private; int enable = !IS_ERR_OR_NULL(es2->apb_log_task); char tmp_buf[3]; sprintf(tmp_buf, "%d\n", enable); return simple_read_from_buffer(buf, count, ppos, tmp_buf, 2); } static ssize_t apb_log_enable_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos) { int enable; ssize_t retval; struct es2_ap_dev *es2 = file_inode(f)->i_private; retval = kstrtoint_from_user(buf, count, 10, &enable); if (retval) return retval; if (enable) usb_log_enable(es2); else usb_log_disable(es2); return count; } static const struct file_operations apb_log_enable_fops = { .read = apb_log_enable_read, .write = apb_log_enable_write, }; static int apb_get_cport_count(struct usb_device *udev) { int retval; __le16 *cport_count; cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL); if (!cport_count) return -ENOMEM; retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), GB_APB_REQUEST_CPORT_COUNT, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, cport_count, sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT); if (retval != sizeof(*cport_count)) { dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", retval); if (retval >= 0) retval = -EIO; goto out; } retval = le16_to_cpu(*cport_count); /* We need to fit a CPort ID in one byte of a message header */ if (retval > U8_MAX) { retval = U8_MAX; dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n"); } out: kfree(cport_count); return retval; } /* * The ES2 USB Bridge device has 15 endpoints * 1 Control - usual USB stuff + AP -> APBridgeA messages * 7 Bulk IN - CPort data in * 7 Bulk OUT - CPort data out */ static int ap_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct es2_ap_dev *es2; struct gb_host_device *hd; struct usb_device *udev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; __u8 ep_addr; int retval; int i; int num_cports; bool bulk_out_found = false; bool bulk_in_found = false; bool arpc_in_found = false; udev = usb_get_dev(interface_to_usbdev(interface)); num_cports = apb_get_cport_count(udev); if (num_cports < 0) { usb_put_dev(udev); dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n", num_cports); return num_cports; } hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX, num_cports); if (IS_ERR(hd)) { usb_put_dev(udev); return PTR_ERR(hd); } es2 = hd_to_es2(hd); es2->hd = hd; es2->usb_intf = interface; es2->usb_dev = udev; spin_lock_init(&es2->cport_out_urb_lock); INIT_KFIFO(es2->apb_log_fifo); usb_set_intfdata(interface, es2); /* * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated * dynamically. */ retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0); if (retval) goto error; retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1); if (retval) goto error; /* find all bulk endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; ep_addr = endpoint->bEndpointAddress; if (usb_endpoint_is_bulk_in(endpoint)) { if (!bulk_in_found) { es2->cport_in.endpoint = ep_addr; bulk_in_found = true; } else if (!arpc_in_found) { es2->arpc_endpoint_in = ep_addr; arpc_in_found = true; } else { dev_warn(&udev->dev, "Unused bulk IN endpoint found: 0x%02x\n", ep_addr); } continue; } if (usb_endpoint_is_bulk_out(endpoint)) { if (!bulk_out_found) { es2->cport_out_endpoint = ep_addr; bulk_out_found = true; } else { dev_warn(&udev->dev, "Unused bulk OUT endpoint found: 0x%02x\n", ep_addr); } continue; } dev_warn(&udev->dev, "Unknown endpoint type found, address 0x%02x\n", ep_addr); } if (!bulk_in_found || !arpc_in_found || !bulk_out_found) { dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n"); retval = -ENODEV; goto error; } /* Allocate buffers for our cport in messages */ for (i = 0; i < NUM_CPORT_IN_URB; ++i) { struct urb *urb; u8 *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->cport_in.urb[i] = urb; buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL); if (!buffer) { retval = -ENOMEM; goto error; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, es2->cport_in.endpoint), buffer, ES2_GBUF_MSG_SIZE_MAX, cport_in_callback, hd); es2->cport_in.buffer[i] = buffer; } /* Allocate buffers for ARPC in messages */ for (i = 0; i < NUM_ARPC_IN_URB; ++i) { struct urb *urb; u8 *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->arpc_urb[i] = urb; buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL); if (!buffer) { retval = -ENOMEM; goto error; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, es2->arpc_endpoint_in), buffer, ARPC_IN_SIZE_MAX, arpc_in_callback, es2); es2->arpc_buffer[i] = buffer; } /* Allocate urbs for our CPort OUT messages */ for (i = 0; i < NUM_CPORT_OUT_URB; ++i) { struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } es2->cport_out_urb[i] = urb; es2->cport_out_urb_busy[i] = false; /* just to be anal */ } /* XXX We will need to rename this per APB */ es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable", 0644, gb_debugfs_get(), es2, &apb_log_enable_fops); INIT_LIST_HEAD(&es2->arpcs); spin_lock_init(&es2->arpc_lock); retval = es2_arpc_in_enable(es2); if (retval) goto error; retval = gb_hd_add(hd); if (retval) goto err_disable_arpc_in; retval = es2_cport_in_enable(es2, &es2->cport_in); if (retval) goto err_hd_del; return 0; err_hd_del: gb_hd_del(hd); err_disable_arpc_in: es2_arpc_in_disable(es2); error: es2_destroy(es2); return retval; } static void ap_disconnect(struct usb_interface *interface) { struct es2_ap_dev *es2 = usb_get_intfdata(interface); gb_hd_del(es2->hd); es2_cport_in_disable(es2, &es2->cport_in); es2_arpc_in_disable(es2); es2_destroy(es2); } static struct usb_driver es2_ap_driver = { .name = "es2_ap_driver", .probe = ap_probe, .disconnect = ap_disconnect, .id_table = id_table, .soft_unbind = 1, }; module_usb_driver(es2_ap_driver); MODULE_DESCRIPTION("Greybus AP USB driver for ES2 controller chips"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
59 14 16 16 14 14 14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM block #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_BLOCK_H #include <linux/blktrace_api.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/tracepoint.h> #include <uapi/linux/ioprio.h> #define RWBS_LEN 8 #define IOPRIO_CLASS_STRINGS \ { IOPRIO_CLASS_NONE, "none" }, \ { IOPRIO_CLASS_RT, "rt" }, \ { IOPRIO_CLASS_BE, "be" }, \ { IOPRIO_CLASS_IDLE, "idle" }, \ { IOPRIO_CLASS_INVALID, "invalid"} #ifdef CONFIG_BUFFER_HEAD DECLARE_EVENT_CLASS(block_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh), TP_STRUCT__entry ( __field( dev_t, dev ) __field( sector_t, sector ) __field( size_t, size ) ), TP_fast_assign( __entry->dev = bh->b_bdev->bd_dev; __entry->sector = bh->b_blocknr; __entry->size = bh->b_size; ), TP_printk("%d,%d sector=%llu size=%zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->sector, __entry->size ) ); /** * block_touch_buffer - mark a buffer accessed * @bh: buffer_head being touched * * Called from touch_buffer(). */ DEFINE_EVENT(block_buffer, block_touch_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); /** * block_dirty_buffer - mark a buffer dirty * @bh: buffer_head being dirtied * * Called from mark_buffer_dirty(). */ DEFINE_EVENT(block_buffer, block_dirty_buffer, TP_PROTO(struct buffer_head *bh), TP_ARGS(bh) ); #endif /* CONFIG_BUFFER_HEAD */ /** * block_rq_requeue - place block IO request back on a queue * @rq: block IO operation request * * The block operation request @rq is being placed back into queue * @q. For some reason the request was not completed and needs to be * put back in the queue. */ TRACE_EVENT(block_rq_requeue, TP_PROTO(struct request *rq), TP_ARGS(rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), 0) ); DECLARE_EVENT_CLASS(block_rq_completion, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( int , error ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_pos(rq); __entry->nr_sector = nr_bytes >> 9; __entry->error = blk_status_to_errno(error); __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; ), TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error) ); /** * block_rq_complete - block IO operation completed by device driver * @rq: block operations request * @error: status code * @nr_bytes: number of completed bytes * * The block_rq_complete tracepoint event indicates that some portion * of operation request has been completed by the device driver. If * the @rq->bio is %NULL, then there is absolutely no additional work to * do for the request. If @rq->bio is non-NULL then there is * additional work required to complete the request. */ DEFINE_EVENT(block_rq_completion, block_rq_complete, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes) ); /** * block_rq_error - block IO operation error reported by device driver * @rq: block operations request * @error: status code * @nr_bytes: number of completed bytes * * The block_rq_error tracepoint event indicates that some portion * of operation request has failed as reported by the device driver. */ DEFINE_EVENT(block_rq_completion, block_rq_error, TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes) ); DECLARE_EVENT_CLASS(block_rq, TP_PROTO(struct request *rq), TP_ARGS(rq), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned int, bytes ) __field( unsigned short, ioprio ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) __dynamic_array( char, cmd, 1 ) ), TP_fast_assign( __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->bytes = blk_rq_bytes(rq); __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, __entry->bytes, __get_str(cmd), (unsigned long long)__entry->sector, __entry->nr_sector, __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), IOPRIO_CLASS_STRINGS), IOPRIO_PRIO_HINT(__entry->ioprio), IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm) ); /** * block_rq_insert - insert block operation request into queue * @rq: block IO operation request * * Called immediately before block operation request @rq is inserted * into queue @q. The fields in the operation request @rq struct can * be examined to determine which device and sectors the pending * operation would access. */ DEFINE_EVENT(block_rq, block_rq_insert, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_rq_issue - issue pending block IO request operation to device driver * @rq: block IO operation request * * Called when block operation request @rq from queue @q is sent to a * device driver for processing. */ DEFINE_EVENT(block_rq, block_rq_issue, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_rq_merge - merge request with another one in the elevator * @rq: block IO operation request * * Called when block operation request @rq from queue @q is merged to another * request queued in the elevator. */ DEFINE_EVENT(block_rq, block_rq_merge, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_io_start - insert a request for execution * @rq: block IO operation request * * Called when block operation request @rq is queued for execution */ DEFINE_EVENT(block_rq, block_io_start, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_io_done - block IO operation request completed * @rq: block IO operation request * * Called when block operation request @rq is completed */ DEFINE_EVENT(block_rq, block_io_done, TP_PROTO(struct request *rq), TP_ARGS(rq) ); /** * block_bio_complete - completed all work on the block operation * @q: queue holding the block operation * @bio: block operation completed * * This tracepoint indicates there is no further work to do on this * block IO operation @bio. */ TRACE_EVENT(block_bio_complete, TP_PROTO(struct request_queue *q, struct bio *bio), TP_ARGS(q, bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned, nr_sector ) __field( int, error ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->error = blk_status_to_errno(bio->bi_status); blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u [%d]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->error) ); DECLARE_EVENT_CLASS(block_bio, TP_PROTO(struct bio *bio), TP_ARGS(bio), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu + %u [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, __entry->comm) ); /** * block_bio_bounce - used bounce buffer when processing block operation * @bio: block operation * * A bounce buffer was used to handle the block operation @bio in @q. * This occurs when hardware limitations prevent a direct transfer of * data between the @bio data memory area and the IO device. Use of a * bounce buffer requires extra copying of data and decreases * performance. */ DEFINE_EVENT(block_bio, block_bio_bounce, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_backmerge - merging block operation to the end of an existing operation * @bio: new block operation to merge * * Merging block request @bio to the end of an existing block request. */ DEFINE_EVENT(block_bio, block_bio_backmerge, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_frontmerge - merging block operation to the beginning of an existing operation * @bio: new block operation to merge * * Merging block IO operation @bio to the beginning of an existing block request. */ DEFINE_EVENT(block_bio, block_bio_frontmerge, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_bio_queue - putting new block IO operation in queue * @bio: new block operation * * About to place the block IO operation @bio into queue @q. */ DEFINE_EVENT(block_bio, block_bio_queue, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_getrq - get a free request entry in queue for block IO operations * @bio: pending block IO operation (can be %NULL) * * A request struct has been allocated to handle the block IO operation @bio. */ DEFINE_EVENT(block_bio, block_getrq, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); /** * block_plug - keep operations requests in request queue * @q: request queue to plug * * Plug the request queue @q. Do not allow block operation requests * to be sent to the device driver. Instead, accumulate requests in * the queue to improve throughput performance of the block device. */ TRACE_EVENT(block_plug, TP_PROTO(struct request_queue *q), TP_ARGS(q), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s]", __entry->comm) ); DECLARE_EVENT_CLASS(block_unplug, TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit), TP_STRUCT__entry( __field( int, nr_rq ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->nr_rq = depth; memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ); /** * block_unplug - release of operations requests in request queue * @q: request queue to unplug * @depth: number of requests just added to the queue * @explicit: whether this was an explicit unplug, or one from schedule() * * Unplug request queue @q because device driver is scheduled to work * on elements in the request queue. */ DEFINE_EVENT(block_unplug, block_unplug, TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit) ); /** * block_split - split a single bio struct into two bio structs * @bio: block operation being split * @new_sector: The starting sector for the new bio * * The bio request @bio needs to be split into two bio requests. The newly * created @bio request starts at @new_sector. This split may be required due to * hardware limitations such as operation crossing device boundaries in a RAID * system. */ TRACE_EVENT(block_split, TP_PROTO(struct bio *bio, unsigned int new_sector), TP_ARGS(bio, new_sector), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( sector_t, new_sector ) __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->new_sector = new_sector; blk_fill_rwbs(__entry->rwbs, bio->bi_opf); memcpy(__entry->comm, current->comm, TASK_COMM_LEN); ), TP_printk("%d,%d %s %llu / %llu [%s]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, (unsigned long long)__entry->new_sector, __entry->comm) ); /** * block_bio_remap - map request for a logical device to the raw device * @bio: revised operation * @dev: original device for the operation * @from: original sector for the operation * * An operation for a logical device has been mapped to the * raw block device. */ TRACE_EVENT(block_bio_remap, TP_PROTO(struct bio *bio, dev_t dev, sector_t from), TP_ARGS(bio, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); __entry->old_dev = dev; __entry->old_sector = from; blk_fill_rwbs(__entry->rwbs, bio->bi_opf); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), (unsigned long long)__entry->old_sector) ); /** * block_rq_remap - map request for a block operation request * @rq: block IO operation request * @dev: device for the operation * @from: original sector for the operation * * The block operation request @rq in @q has been remapped. The block * operation request @rq holds the current information and @from hold * the original sector. */ TRACE_EVENT(block_rq_remap, TP_PROTO(struct request *rq, dev_t dev, sector_t from), TP_ARGS(rq, dev, from), TP_STRUCT__entry( __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) __field( unsigned int, nr_bios ) __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( __entry->dev = disk_devt(rq->q->disk); __entry->sector = blk_rq_pos(rq); __entry->nr_sector = blk_rq_sectors(rq); __entry->old_dev = dev; __entry->old_sector = from; __entry->nr_bios = blk_rq_count_bios(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); ), TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, (unsigned long long)__entry->sector, __entry->nr_sector, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), (unsigned long long)__entry->old_sector, __entry->nr_bios) ); #endif /* _TRACE_BLOCK_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
6 4 2 3 9 19 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * <linux/usb/audio.h> -- USB Audio definitions. * * Copyright (C) 2006 Thumtronics Pty Ltd. * Developed for Thumtronics by Grey Innovation * Ben Williamson <ben.williamson@greyinnovation.com> * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices. * Comments below reference relevant sections of that document: * * http://www.usb.org/developers/devclass_docs/audio10.pdf * * Types and defines in this file are either specific to version 1.0 of * this standard or common for newer versions. */ #ifndef _UAPI__LINUX_USB_AUDIO_H #define _UAPI__LINUX_USB_AUDIO_H #include <linux/types.h> /* bInterfaceProtocol values to denote the version of the standard used */ #define UAC_VERSION_1 0x00 #define UAC_VERSION_2 0x20 #define UAC_VERSION_3 0x30 /* A.2 Audio Interface Subclass Codes */ #define USB_SUBCLASS_AUDIOCONTROL 0x01 #define USB_SUBCLASS_AUDIOSTREAMING 0x02 #define USB_SUBCLASS_MIDISTREAMING 0x03 /* A.5 Audio Class-Specific AC Interface Descriptor Subtypes */ #define UAC_HEADER 0x01 #define UAC_INPUT_TERMINAL 0x02 #define UAC_OUTPUT_TERMINAL 0x03 #define UAC_MIXER_UNIT 0x04 #define UAC_SELECTOR_UNIT 0x05 #define UAC_FEATURE_UNIT 0x06 #define UAC1_PROCESSING_UNIT 0x07 #define UAC1_EXTENSION_UNIT 0x08 /* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ #define UAC_AS_GENERAL 0x01 #define UAC_FORMAT_TYPE 0x02 #define UAC_FORMAT_SPECIFIC 0x03 /* A.7 Processing Unit Process Types */ #define UAC_PROCESS_UNDEFINED 0x00 #define UAC_PROCESS_UP_DOWNMIX 0x01 #define UAC_PROCESS_DOLBY_PROLOGIC 0x02 #define UAC_PROCESS_STEREO_EXTENDER 0x03 #define UAC_PROCESS_REVERB 0x04 #define UAC_PROCESS_CHORUS 0x05 #define UAC_PROCESS_DYN_RANGE_COMP 0x06 /* A.8 Audio Class-Specific Endpoint Descriptor Subtypes */ #define UAC_EP_GENERAL 0x01 /* A.9 Audio Class-Specific Request Codes */ #define UAC_SET_ 0x00 #define UAC_GET_ 0x80 #define UAC__CUR 0x1 #define UAC__MIN 0x2 #define UAC__MAX 0x3 #define UAC__RES 0x4 #define UAC__MEM 0x5 #define UAC_SET_CUR (UAC_SET_ | UAC__CUR) #define UAC_GET_CUR (UAC_GET_ | UAC__CUR) #define UAC_SET_MIN (UAC_SET_ | UAC__MIN) #define UAC_GET_MIN (UAC_GET_ | UAC__MIN) #define UAC_SET_MAX (UAC_SET_ | UAC__MAX) #define UAC_GET_MAX (UAC_GET_ | UAC__MAX) #define UAC_SET_RES (UAC_SET_ | UAC__RES) #define UAC_GET_RES (UAC_GET_ | UAC__RES) #define UAC_SET_MEM (UAC_SET_ | UAC__MEM) #define UAC_GET_MEM (UAC_GET_ | UAC__MEM) #define UAC_GET_STAT 0xff /* A.10 Control Selector Codes */ /* A.10.1 Terminal Control Selectors */ #define UAC_TERM_COPY_PROTECT 0x01 /* A.10.2 Feature Unit Control Selectors */ #define UAC_FU_MUTE 0x01 #define UAC_FU_VOLUME 0x02 #define UAC_FU_BASS 0x03 #define UAC_FU_MID 0x04 #define UAC_FU_TREBLE 0x05 #define UAC_FU_GRAPHIC_EQUALIZER 0x06 #define UAC_FU_AUTOMATIC_GAIN 0x07 #define UAC_FU_DELAY 0x08 #define UAC_FU_BASS_BOOST 0x09 #define UAC_FU_LOUDNESS 0x0a #define UAC_CONTROL_BIT(CS) (1 << ((CS) - 1)) /* A.10.3.1 Up/Down-mix Processing Unit Controls Selectors */ #define UAC_UD_ENABLE 0x01 #define UAC_UD_MODE_SELECT 0x02 /* A.10.3.2 Dolby Prologic (tm) Processing Unit Controls Selectors */ #define UAC_DP_ENABLE 0x01 #define UAC_DP_MODE_SELECT 0x02 /* A.10.3.3 3D Stereo Extender Processing Unit Control Selectors */ #define UAC_3D_ENABLE 0x01 #define UAC_3D_SPACE 0x02 /* A.10.3.4 Reverberation Processing Unit Control Selectors */ #define UAC_REVERB_ENABLE 0x01 #define UAC_REVERB_LEVEL 0x02 #define UAC_REVERB_TIME 0x03 #define UAC_REVERB_FEEDBACK 0x04 /* A.10.3.5 Chorus Processing Unit Control Selectors */ #define UAC_CHORUS_ENABLE 0x01 #define UAC_CHORUS_LEVEL 0x02 #define UAC_CHORUS_RATE 0x03 #define UAC_CHORUS_DEPTH 0x04 /* A.10.3.6 Dynamic Range Compressor Unit Control Selectors */ #define UAC_DCR_ENABLE 0x01 #define UAC_DCR_RATE 0x02 #define UAC_DCR_MAXAMPL 0x03 #define UAC_DCR_THRESHOLD 0x04 #define UAC_DCR_ATTACK_TIME 0x05 #define UAC_DCR_RELEASE_TIME 0x06 /* A.10.4 Extension Unit Control Selectors */ #define UAC_XU_ENABLE 0x01 /* MIDI - A.1 MS Class-Specific Interface Descriptor Subtypes */ #define UAC_MS_HEADER 0x01 #define UAC_MIDI_IN_JACK 0x02 #define UAC_MIDI_OUT_JACK 0x03 /* MIDI - A.1 MS Class-Specific Endpoint Descriptor Subtypes */ #define UAC_MS_GENERAL 0x01 /* Terminals - 2.1 USB Terminal Types */ #define UAC_TERMINAL_UNDEFINED 0x100 #define UAC_TERMINAL_STREAMING 0x101 #define UAC_TERMINAL_VENDOR_SPEC 0x1FF /* Terminal Control Selectors */ /* 4.3.2 Class-Specific AC Interface Descriptor */ struct uac1_ac_header_descriptor { __u8 bLength; /* 8 + n */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ __le16 bcdADC; /* 0x0100 */ __le16 wTotalLength; /* includes Unit and Terminal desc. */ __u8 bInCollection; /* n */ __u8 baInterfaceNr[]; /* [n] */ } __attribute__ ((packed)); #define UAC_DT_AC_HEADER_SIZE(n) (8 + (n)) /* As above, but more useful for defining your own descriptors: */ #define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ struct uac1_ac_header_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __le16 bcdADC; \ __le16 wTotalLength; \ __u8 bInCollection; \ __u8 baInterfaceNr[n]; \ } __attribute__ ((packed)) /* 4.3.2.1 Input Terminal Descriptor */ struct uac_input_terminal_descriptor { __u8 bLength; /* in bytes: 12 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */ __u8 bTerminalID; /* Constant uniquely terminal ID */ __le16 wTerminalType; /* USB Audio Terminal Types */ __u8 bAssocTerminal; /* ID of the Output Terminal associated */ __u8 bNrChannels; /* Number of logical output channels */ __le16 wChannelConfig; __u8 iChannelNames; __u8 iTerminal; } __attribute__ ((packed)); #define UAC_DT_INPUT_TERMINAL_SIZE 12 /* Terminals - 2.2 Input Terminal Types */ #define UAC_INPUT_TERMINAL_UNDEFINED 0x200 #define UAC_INPUT_TERMINAL_MICROPHONE 0x201 #define UAC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 #define UAC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 #define UAC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 #define UAC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 #define UAC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 /* Terminals - control selectors */ #define UAC_TERMINAL_CS_COPY_PROTECT_CONTROL 0x01 /* 4.3.2.2 Output Terminal Descriptor */ struct uac1_output_terminal_descriptor { __u8 bLength; /* in bytes: 9 */ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ __u8 bTerminalID; /* Constant uniquely terminal ID */ __le16 wTerminalType; /* USB Audio Terminal Types */ __u8 bAssocTerminal; /* ID of the Input Terminal associated */ __u8 bSourceID; /* ID of the connected Unit or Terminal*/ __u8 iTerminal; } __attribute__ ((packed)); #define UAC_DT_OUTPUT_TERMINAL_SIZE 9 /* Terminals - 2.3 Output Terminal Types */ #define UAC_OUTPUT_TERMINAL_UNDEFINED 0x300 #define UAC_OUTPUT_TERMINAL_SPEAKER 0x301 #define UAC_OUTPUT_TERMINAL_HEADPHONES 0x302 #define UAC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 #define UAC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 #define UAC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 #define UAC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 #define UAC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 /* Terminals - 2.4 Bi-directional Terminal Types */ #define UAC_BIDIR_TERMINAL_UNDEFINED 0x400 #define UAC_BIDIR_TERMINAL_HANDSET 0x401 #define UAC_BIDIR_TERMINAL_HEADSET 0x402 #define UAC_BIDIR_TERMINAL_SPEAKER_PHONE 0x403 #define UAC_BIDIR_TERMINAL_ECHO_SUPPRESSING 0x404 #define UAC_BIDIR_TERMINAL_ECHO_CANCELING 0x405 /* Set bControlSize = 2 as default setting */ #define UAC_DT_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) /* As above, but more useful for defining your own descriptors: */ #define DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(ch) \ struct uac_feature_unit_descriptor_##ch { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bUnitID; \ __u8 bSourceID; \ __u8 bControlSize; \ __le16 bmaControls[ch + 1]; \ __u8 iFeature; \ } __attribute__ ((packed)) /* 4.3.2.3 Mixer Unit Descriptor */ struct uac_mixer_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bNrInPins; __u8 baSourceID[]; } __attribute__ ((packed)); static inline __u8 uac_mixer_unit_bNrChannels(struct uac_mixer_unit_descriptor *desc) { return desc->baSourceID[desc->bNrInPins]; } static inline __u32 uac_mixer_unit_wChannelConfig(struct uac_mixer_unit_descriptor *desc, int protocol) { if (protocol == UAC_VERSION_1) return (desc->baSourceID[desc->bNrInPins + 2] << 8) | desc->baSourceID[desc->bNrInPins + 1]; else return (desc->baSourceID[desc->bNrInPins + 4] << 24) | (desc->baSourceID[desc->bNrInPins + 3] << 16) | (desc->baSourceID[desc->bNrInPins + 2] << 8) | (desc->baSourceID[desc->bNrInPins + 1]); } static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor *desc, int protocol) { return (protocol == UAC_VERSION_1) ? desc->baSourceID[desc->bNrInPins + 3] : desc->baSourceID[desc->bNrInPins + 5]; } static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return &desc->baSourceID[desc->bNrInPins + 4]; case UAC_VERSION_2: return &desc->baSourceID[desc->bNrInPins + 6]; case UAC_VERSION_3: return &desc->baSourceID[desc->bNrInPins + 2]; default: return NULL; } } static inline __u16 uac3_mixer_unit_wClusterDescrID(struct uac_mixer_unit_descriptor *desc) { return (desc->baSourceID[desc->bNrInPins + 1] << 8) | desc->baSourceID[desc->bNrInPins]; } static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc) { __u8 *raw = (__u8 *) desc; return raw[desc->bLength - 1]; } /* 4.3.2.4 Selector Unit Descriptor */ struct uac_selector_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUintID; __u8 bNrInPins; __u8 baSourceID[]; } __attribute__ ((packed)); static inline __u8 uac_selector_unit_iSelector(struct uac_selector_unit_descriptor *desc) { __u8 *raw = (__u8 *) desc; return raw[desc->bLength - 1]; } /* 4.3.2.5 Feature Unit Descriptor */ struct uac_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; __u8 bControlSize; __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc) { __u8 *raw = (__u8 *) desc; return raw[desc->bLength - 1]; } /* 4.3.2.6 Processing Unit Descriptors */ struct uac_processing_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __le16 wProcessType; __u8 bNrInPins; __u8 baSourceID[]; } __attribute__ ((packed)); static inline __u8 uac_processing_unit_bNrChannels(struct uac_processing_unit_descriptor *desc) { return desc->baSourceID[desc->bNrInPins]; } static inline __u32 uac_processing_unit_wChannelConfig(struct uac_processing_unit_descriptor *desc, int protocol) { if (protocol == UAC_VERSION_1) return (desc->baSourceID[desc->bNrInPins + 2] << 8) | desc->baSourceID[desc->bNrInPins + 1]; else return (desc->baSourceID[desc->bNrInPins + 4] << 24) | (desc->baSourceID[desc->bNrInPins + 3] << 16) | (desc->baSourceID[desc->bNrInPins + 2] << 8) | (desc->baSourceID[desc->bNrInPins + 1]); } static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_descriptor *desc, int protocol) { return (protocol == UAC_VERSION_1) ? desc->baSourceID[desc->bNrInPins + 3] : desc->baSourceID[desc->bNrInPins + 5]; } static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return desc->baSourceID[desc->bNrInPins + 4]; case UAC_VERSION_2: return 2; /* in UAC2, this value is constant */ case UAC_VERSION_3: return 4; /* in UAC3, this value is constant */ default: return 1; } } static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return &desc->baSourceID[desc->bNrInPins + 5]; case UAC_VERSION_2: return &desc->baSourceID[desc->bNrInPins + 6]; case UAC_VERSION_3: return &desc->baSourceID[desc->bNrInPins + 2]; default: return NULL; } } static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); switch (protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return *(uac_processing_unit_bmControls(desc, protocol) + control_size); case UAC_VERSION_3: return 0; /* UAC3 does not have this field */ } } static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); switch (protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return uac_processing_unit_bmControls(desc, protocol) + control_size + 1; case UAC_VERSION_3: return uac_processing_unit_bmControls(desc, protocol) + control_size; } } /* * Extension Unit (XU) has almost compatible layout with Processing Unit, but * on UAC2, it has a different bmControls size (bControlSize); it's 1 byte for * XU while 2 bytes for PU. The last iExtension field is a one-byte index as * well as iProcessing field of PU. */ static inline __u8 uac_extension_unit_bControlSize(struct uac_processing_unit_descriptor *desc, int protocol) { switch (protocol) { case UAC_VERSION_1: return desc->baSourceID[desc->bNrInPins + 4]; case UAC_VERSION_2: return 1; /* in UAC2, this value is constant */ case UAC_VERSION_3: return 4; /* in UAC3, this value is constant */ default: return 1; } } static inline __u8 uac_extension_unit_iExtension(struct uac_processing_unit_descriptor *desc, int protocol) { __u8 control_size = uac_extension_unit_bControlSize(desc, protocol); switch (protocol) { case UAC_VERSION_1: case UAC_VERSION_2: default: return *(uac_processing_unit_bmControls(desc, protocol) + control_size); case UAC_VERSION_3: return 0; /* UAC3 does not have this field */ } } /* 4.5.2 Class-Specific AS Interface Descriptor */ struct uac1_as_header_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* AS_GENERAL */ __u8 bTerminalLink; /* Terminal ID of connected Terminal */ __u8 bDelay; /* Delay introduced by the data path */ __le16 wFormatTag; /* The Audio Data Format */ } __attribute__ ((packed)); #define UAC_DT_AS_HEADER_SIZE 7 /* Formats - A.1.1 Audio Data Format Type I Codes */ #define UAC_FORMAT_TYPE_I_UNDEFINED 0x0 #define UAC_FORMAT_TYPE_I_PCM 0x1 #define UAC_FORMAT_TYPE_I_PCM8 0x2 #define UAC_FORMAT_TYPE_I_IEEE_FLOAT 0x3 #define UAC_FORMAT_TYPE_I_ALAW 0x4 #define UAC_FORMAT_TYPE_I_MULAW 0x5 struct uac_format_type_i_continuous_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ __u8 bFormatType; /* FORMAT_TYPE_1 */ __u8 bNrChannels; /* physical channels in the stream */ __u8 bSubframeSize; /* */ __u8 bBitResolution; __u8 bSamFreqType; __u8 tLowerSamFreq[3]; __u8 tUpperSamFreq[3]; } __attribute__ ((packed)); #define UAC_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 struct uac_format_type_i_discrete_descriptor { __u8 bLength; /* in bytes: 8 + (ns * 3) */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ __u8 bFormatType; /* FORMAT_TYPE_1 */ __u8 bNrChannels; /* physical channels in the stream */ __u8 bSubframeSize; /* */ __u8 bBitResolution; __u8 bSamFreqType; __u8 tSamFreq[][3]; } __attribute__ ((packed)); #define DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(n) \ struct uac_format_type_i_discrete_descriptor_##n { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bFormatType; \ __u8 bNrChannels; \ __u8 bSubframeSize; \ __u8 bBitResolution; \ __u8 bSamFreqType; \ __u8 tSamFreq[n][3]; \ } __attribute__ ((packed)) #define UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) struct uac_format_type_i_ext_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __u8 bSubslotSize; __u8 bBitResolution; __u8 bHeaderLength; __u8 bControlSize; __u8 bSideBandProtocol; } __attribute__((packed)); /* Formats - Audio Data Format Type I Codes */ #define UAC_FORMAT_TYPE_II_MPEG 0x1001 #define UAC_FORMAT_TYPE_II_AC3 0x1002 struct uac_format_type_ii_discrete_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __le16 wMaxBitRate; __le16 wSamplesPerFrame; __u8 bSamFreqType; __u8 tSamFreq[][3]; } __attribute__((packed)); struct uac_format_type_ii_ext_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bFormatType; __le16 wMaxBitRate; __le16 wSamplesPerFrame; __u8 bHeaderLength; __u8 bSideBandProtocol; } __attribute__((packed)); /* type III */ #define UAC_FORMAT_TYPE_III_IEC1937_AC3 0x2001 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG1_LAYER1 0x2002 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_NOEXT 0x2003 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_EXT 0x2004 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_LAYER1_LS 0x2005 #define UAC_FORMAT_TYPE_III_IEC1937_MPEG2_LAYER23_LS 0x2006 /* Formats - A.2 Format Type Codes */ #define UAC_FORMAT_TYPE_UNDEFINED 0x0 #define UAC_FORMAT_TYPE_I 0x1 #define UAC_FORMAT_TYPE_II 0x2 #define UAC_FORMAT_TYPE_III 0x3 #define UAC_EXT_FORMAT_TYPE_I 0x81 #define UAC_EXT_FORMAT_TYPE_II 0x82 #define UAC_EXT_FORMAT_TYPE_III 0x83 struct uac_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 7 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ __u8 bDescriptorSubtype; /* EP_GENERAL */ __u8 bmAttributes; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); #define UAC_ISO_ENDPOINT_DESC_SIZE 7 #define UAC_EP_CS_ATTR_SAMPLE_RATE 0x01 #define UAC_EP_CS_ATTR_PITCH_CONTROL 0x02 #define UAC_EP_CS_ATTR_FILL_MAX 0x80 /* status word format (3.7.1.1) */ #define UAC1_STATUS_TYPE_ORIG_MASK 0x0f #define UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF 0x0 #define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_IF 0x1 #define UAC1_STATUS_TYPE_ORIG_AUDIO_STREAM_EP 0x2 #define UAC1_STATUS_TYPE_IRQ_PENDING (1 << 7) #define UAC1_STATUS_TYPE_MEM_CHANGED (1 << 6) struct uac1_status_word { __u8 bStatusType; __u8 bOriginator; } __attribute__((packed)); #endif /* _UAPI__LINUX_USB_AUDIO_H */
1008 1003 1004 620 1008 1008 1008 1005 1004 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 // SPDX-License-Identifier: GPL-2.0-only /* * Access kernel or user memory without faulting. */ #include <linux/export.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/tlb.h> bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { return true; } /* * The below only uses kmsan_check_memory() to ensure uninitialized kernel * memory isn't leaked. */ #define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __get_kernel_nofault(dst, src, type, err_label); \ kmsan_check_memory(src, sizeof(type)); \ dst += sizeof(type); \ src += sizeof(type); \ len -= sizeof(type); \ } long copy_from_kernel_nofault(void *dst, const void *src, size_t size) { unsigned long align = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) align = (unsigned long)dst | (unsigned long)src; if (!copy_from_kernel_nofault_allowed(src, size)) return -ERANGE; pagefault_disable(); if (!(align & 7)) copy_from_kernel_nofault_loop(dst, src, size, u64, Efault); if (!(align & 3)) copy_from_kernel_nofault_loop(dst, src, size, u32, Efault); if (!(align & 1)) copy_from_kernel_nofault_loop(dst, src, size, u16, Efault); copy_from_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: pagefault_enable(); return -EFAULT; } EXPORT_SYMBOL_GPL(copy_from_kernel_nofault); #define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __put_kernel_nofault(dst, src, type, err_label); \ instrument_write(dst, sizeof(type)); \ dst += sizeof(type); \ src += sizeof(type); \ len -= sizeof(type); \ } long copy_to_kernel_nofault(void *dst, const void *src, size_t size) { unsigned long align = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) align = (unsigned long)dst | (unsigned long)src; pagefault_disable(); if (!(align & 7)) copy_to_kernel_nofault_loop(dst, src, size, u64, Efault); if (!(align & 3)) copy_to_kernel_nofault_loop(dst, src, size, u32, Efault); if (!(align & 1)) copy_to_kernel_nofault_loop(dst, src, size, u16, Efault); copy_to_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: pagefault_enable(); return -EFAULT; } EXPORT_SYMBOL_GPL(copy_to_kernel_nofault); long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) { const void *src = unsafe_addr; if (unlikely(count <= 0)) return 0; if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) return -ERANGE; pagefault_disable(); do { __get_kernel_nofault(dst, src, u8, Efault); dst++; src++; } while (dst[-1] && src - unsafe_addr < count); pagefault_enable(); dst[-1] = '\0'; return src - unsafe_addr; Efault: pagefault_enable(); dst[0] = '\0'; return -EFAULT; } /** * copy_from_user_nofault(): safely attempt to read from a user-space location * @dst: pointer to the buffer that shall take the data * @src: address to read from. This must be a user address. * @size: size of the data chunk * * Safely read from user address @src to the buffer at @dst. If a kernel fault * happens, handle that and return -EFAULT. */ long copy_from_user_nofault(void *dst, const void __user *src, size_t size) { long ret = -EFAULT; if (!__access_ok(src, size)) return ret; if (!nmi_uaccess_okay()) return ret; pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, size); pagefault_enable(); if (ret) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(copy_from_user_nofault); /** * copy_to_user_nofault(): safely attempt to write to a user-space location * @dst: address to write to * @src: pointer to the data that shall be written * @size: size of the data chunk * * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ long copy_to_user_nofault(void __user *dst, const void *src, size_t size) { long ret = -EFAULT; if (access_ok(dst, size)) { pagefault_disable(); ret = __copy_to_user_inatomic(dst, src, size); pagefault_enable(); } if (ret) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(copy_to_user_nofault); /** * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user * address. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. * @unsafe_addr: Unsafe user address. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from unsafe user address to kernel buffer. * * On success, returns the length of the string INCLUDING the trailing NUL. * * If access fails, returns -EFAULT (some data may have been copied * and the trailing NUL added). * * If @count is smaller than the length of the string, copies @count-1 bytes, * sets the last byte of @dst buffer to NUL and returns @count. */ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { long ret; if (unlikely(count <= 0)) return 0; pagefault_disable(); ret = strncpy_from_user(dst, unsafe_addr, count); pagefault_enable(); if (ret >= count) { ret = count; dst[ret - 1] = '\0'; } else if (ret > 0) { ret++; } return ret; } /** * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL. * @unsafe_addr: The string to measure. * @count: Maximum count (including NUL) * * Get the size of a NUL-terminated string in user space without pagefault. * * Returns the size of the string INCLUDING the terminating NUL. * * If the string is too long, returns a number larger than @count. User * has to check the return value against "> count". * On exception (or invalid count), returns 0. * * Unlike strnlen_user, this can be used from IRQ handler etc. because * it disables pagefaults. */ long strnlen_user_nofault(const void __user *unsafe_addr, long count) { int ret; pagefault_disable(); ret = strnlen_user(unsafe_addr, count); pagefault_enable(); return ret; } void __copy_overflow(int size, unsigned long count) { WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); } EXPORT_SYMBOL(__copy_overflow);
2 2 1 1 1 1 1 1 2 2 7 7 6 1 5 4 2 3 1 2 2 2 2 2 2 2 1 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 // SPDX-License-Identifier: GPL-2.0+ // Driver for Xbox DVD Movie Playback Kit // Copyright (c) 2018 by Benjamin Valentin <benpicco@googlemail.com> /* * Xbox DVD Movie Playback Kit USB IR dongle support * * The driver was derived from the ati_remote driver 2.2.1 * and used information from lirc_xbox.c * * Copyright (c) 2011, 2012 Anssi Hannula <anssi.hannula@iki.fi> * Copyright (c) 2004 Torrey Hoffman <thoffman@arnor.net> * Copyright (c) 2002 Vladimir Dergachev * Copyright (c) 2003-2004 Paul Miller <pmiller9@users.sourceforge.net> */ #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #include <media/rc-core.h> /* * Module and Version Information */ #define DRIVER_VERSION "1.0.0" #define DRIVER_AUTHOR "Benjamin Valentin <benpicco@googlemail.com>" #define DRIVER_DESC "Xbox DVD USB Remote Control" #define NAME_BUFSIZE 80 /* size of product name, path buffers */ #define DATA_BUFSIZE 8 /* size of URB data buffers */ /* * USB vendor ids for XBOX DVD Dongles */ #define VENDOR_GAMESTER 0x040b #define VENDOR_MICROSOFT 0x045e static const struct usb_device_id xbox_remote_table[] = { /* Gamester Xbox DVD Movie Playback Kit IR */ { USB_DEVICE(VENDOR_GAMESTER, 0x6521), }, /* Microsoft Xbox DVD Movie Playback Kit IR */ { USB_DEVICE(VENDOR_MICROSOFT, 0x0284), }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, xbox_remote_table); struct xbox_remote { struct rc_dev *rdev; struct usb_device *udev; struct usb_interface *interface; struct urb *irq_urb; unsigned char inbuf[DATA_BUFSIZE] __aligned(sizeof(u16)); char rc_name[NAME_BUFSIZE]; char rc_phys[NAME_BUFSIZE]; }; static int xbox_remote_rc_open(struct rc_dev *rdev) { struct xbox_remote *xbox_remote = rdev->priv; /* On first open, submit the read urb which was set up previously. */ xbox_remote->irq_urb->dev = xbox_remote->udev; if (usb_submit_urb(xbox_remote->irq_urb, GFP_KERNEL)) { dev_err(&xbox_remote->interface->dev, "%s: usb_submit_urb failed!\n", __func__); return -EIO; } return 0; } static void xbox_remote_rc_close(struct rc_dev *rdev) { struct xbox_remote *xbox_remote = rdev->priv; usb_kill_urb(xbox_remote->irq_urb); } /* * xbox_remote_report_input */ static void xbox_remote_input_report(struct urb *urb) { struct xbox_remote *xbox_remote = urb->context; unsigned char *data = xbox_remote->inbuf; /* * data[0] = 0x00 * data[1] = length - always 0x06 * data[2] = the key code * data[3] = high part of key code * data[4] = last_press_ms (low) * data[5] = last_press_ms (high) */ /* Deal with strange looking inputs */ if (urb->actual_length != 6 || urb->actual_length != data[1]) { dev_warn(&urb->dev->dev, "Weird data, len=%d: %*ph\n", urb->actual_length, urb->actual_length, data); return; } rc_keydown(xbox_remote->rdev, RC_PROTO_XBOX_DVD, le16_to_cpup((__le16 *)(data + 2)), 0); } /* * xbox_remote_irq_in */ static void xbox_remote_irq_in(struct urb *urb) { struct xbox_remote *xbox_remote = urb->context; int retval; switch (urb->status) { case 0: /* success */ xbox_remote_input_report(urb); break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: dev_dbg(&xbox_remote->interface->dev, "%s: urb error status, unlink?\n", __func__); return; default: /* error */ dev_dbg(&xbox_remote->interface->dev, "%s: Nonzero urb status %d\n", __func__, urb->status); } retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&xbox_remote->interface->dev, "%s: usb_submit_urb()=%d\n", __func__, retval); } static void xbox_remote_rc_init(struct xbox_remote *xbox_remote) { struct rc_dev *rdev = xbox_remote->rdev; rdev->priv = xbox_remote; rdev->allowed_protocols = RC_PROTO_BIT_XBOX_DVD; rdev->driver_name = "xbox_remote"; rdev->open = xbox_remote_rc_open; rdev->close = xbox_remote_rc_close; rdev->device_name = xbox_remote->rc_name; rdev->input_phys = xbox_remote->rc_phys; rdev->timeout = MS_TO_US(10); usb_to_input_id(xbox_remote->udev, &rdev->input_id); rdev->dev.parent = &xbox_remote->interface->dev; } static void xbox_remote_initialize(struct xbox_remote *xbox_remote, struct usb_endpoint_descriptor *endpoint_in) { struct usb_device *udev = xbox_remote->udev; int pipe, maxp; /* Set up irq_urb */ pipe = usb_rcvintpipe(udev, endpoint_in->bEndpointAddress); maxp = usb_maxpacket(udev, pipe); maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp; usb_fill_int_urb(xbox_remote->irq_urb, udev, pipe, xbox_remote->inbuf, maxp, xbox_remote_irq_in, xbox_remote, endpoint_in->bInterval); } /* * xbox_remote_probe */ static int xbox_remote_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *iface_host = interface->cur_altsetting; struct usb_endpoint_descriptor *endpoint_in; struct xbox_remote *xbox_remote; struct rc_dev *rc_dev; int err = -ENOMEM; // why is there also a device with no endpoints? if (iface_host->desc.bNumEndpoints == 0) return -ENODEV; if (iface_host->desc.bNumEndpoints != 1) { pr_err("%s: Unexpected desc.bNumEndpoints: %d\n", __func__, iface_host->desc.bNumEndpoints); return -ENODEV; } endpoint_in = &iface_host->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint_in)) { pr_err("%s: Unexpected endpoint_in\n", __func__); return -ENODEV; } if (le16_to_cpu(endpoint_in->wMaxPacketSize) == 0) { pr_err("%s: endpoint_in message size==0?\n", __func__); return -ENODEV; } xbox_remote = kzalloc(sizeof(*xbox_remote), GFP_KERNEL); rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE); if (!xbox_remote || !rc_dev) goto exit_free_dev_rdev; /* Allocate URB buffer */ xbox_remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!xbox_remote->irq_urb) goto exit_free_buffers; xbox_remote->udev = udev; xbox_remote->rdev = rc_dev; xbox_remote->interface = interface; usb_make_path(udev, xbox_remote->rc_phys, sizeof(xbox_remote->rc_phys)); strlcat(xbox_remote->rc_phys, "/input0", sizeof(xbox_remote->rc_phys)); snprintf(xbox_remote->rc_name, sizeof(xbox_remote->rc_name), "%s%s%s", udev->manufacturer ?: "", udev->manufacturer && udev->product ? " " : "", udev->product ?: ""); if (!strlen(xbox_remote->rc_name)) snprintf(xbox_remote->rc_name, sizeof(xbox_remote->rc_name), DRIVER_DESC "(%04x,%04x)", le16_to_cpu(xbox_remote->udev->descriptor.idVendor), le16_to_cpu(xbox_remote->udev->descriptor.idProduct)); rc_dev->map_name = RC_MAP_XBOX_DVD; /* default map */ xbox_remote_rc_init(xbox_remote); /* Device Hardware Initialization */ xbox_remote_initialize(xbox_remote, endpoint_in); /* Set up and register rc device */ err = rc_register_device(xbox_remote->rdev); if (err) goto exit_kill_urbs; usb_set_intfdata(interface, xbox_remote); return 0; exit_kill_urbs: usb_kill_urb(xbox_remote->irq_urb); exit_free_buffers: usb_free_urb(xbox_remote->irq_urb); exit_free_dev_rdev: rc_free_device(rc_dev); kfree(xbox_remote); return err; } /* * xbox_remote_disconnect */ static void xbox_remote_disconnect(struct usb_interface *interface) { struct xbox_remote *xbox_remote; xbox_remote = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (!xbox_remote) { dev_warn(&interface->dev, "%s - null device?\n", __func__); return; } usb_kill_urb(xbox_remote->irq_urb); rc_unregister_device(xbox_remote->rdev); usb_free_urb(xbox_remote->irq_urb); kfree(xbox_remote); } /* usb specific object to register with the usb subsystem */ static struct usb_driver xbox_remote_driver = { .name = "xbox_remote", .probe = xbox_remote_probe, .disconnect = xbox_remote_disconnect, .id_table = xbox_remote_table, }; module_usb_driver(xbox_remote_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
5 4 5 1 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 1 1 1 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 // SPDX-License-Identifier: GPL-2.0-or-later /* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/usb.h> #include "as102_drv.h" #include "as102_usb_drv.h" #include "as102_fw.h" static void as102_usb_disconnect(struct usb_interface *interface); static int as102_usb_probe(struct usb_interface *interface, const struct usb_device_id *id); static int as102_usb_start_stream(struct as102_dev_t *dev); static void as102_usb_stop_stream(struct as102_dev_t *dev); static int as102_open(struct inode *inode, struct file *file); static int as102_release(struct inode *inode, struct file *file); static const struct usb_device_id as102_usb_id_table[] = { { USB_DEVICE(AS102_USB_DEVICE_VENDOR_ID, AS102_USB_DEVICE_PID_0001) }, { USB_DEVICE(PCTV_74E_USB_VID, PCTV_74E_USB_PID) }, { USB_DEVICE(ELGATO_EYETV_DTT_USB_VID, ELGATO_EYETV_DTT_USB_PID) }, { USB_DEVICE(NBOX_DVBT_DONGLE_USB_VID, NBOX_DVBT_DONGLE_USB_PID) }, { USB_DEVICE(SKY_IT_DIGITAL_KEY_USB_VID, SKY_IT_DIGITAL_KEY_USB_PID) }, { } /* Terminating entry */ }; /* Note that this table must always have the same number of entries as the as102_usb_id_table struct */ static const char * const as102_device_names[] = { AS102_REFERENCE_DESIGN, AS102_PCTV_74E, AS102_ELGATO_EYETV_DTT_NAME, AS102_NBOX_DVBT_DONGLE_NAME, AS102_SKY_IT_DIGITAL_KEY_NAME, NULL /* Terminating entry */ }; /* eLNA configuration: devices built on the reference design work best with 0xA0, while custom designs seem to require 0xC0 */ static uint8_t const as102_elna_cfg[] = { 0xA0, 0xC0, 0xC0, 0xA0, 0xA0, 0x00 /* Terminating entry */ }; struct usb_driver as102_usb_driver = { .name = DRIVER_FULL_NAME, .probe = as102_usb_probe, .disconnect = as102_usb_disconnect, .id_table = as102_usb_id_table }; static const struct file_operations as102_dev_fops = { .owner = THIS_MODULE, .open = as102_open, .release = as102_release, }; static struct usb_class_driver as102_usb_class_driver = { .name = "aton2-%d", .fops = &as102_dev_fops, .minor_base = AS102_DEVICE_MAJOR, }; static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, unsigned char *recv_buf, int recv_buf_len) { int ret = 0; if (send_buf != NULL) { ret = usb_control_msg(bus_adap->usb_dev, usb_sndctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_TX_CTRL_CMD, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ send_buf, send_buf_len, USB_CTRL_SET_TIMEOUT /* 200 */); if (ret < 0) { dev_dbg(&bus_adap->usb_dev->dev, "usb_control_msg(send) failed, err %i\n", ret); return ret; } if (ret != send_buf_len) { dev_dbg(&bus_adap->usb_dev->dev, "only wrote %d of %d bytes\n", ret, send_buf_len); return -1; } } if (recv_buf != NULL) { #ifdef TRACE dev_dbg(bus_adap->usb_dev->dev, "want to read: %d bytes\n", recv_buf_len); #endif ret = usb_control_msg(bus_adap->usb_dev, usb_rcvctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_RX_CTRL_CMD, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ recv_buf, recv_buf_len, USB_CTRL_GET_TIMEOUT /* 200 */); if (ret < 0) { dev_dbg(&bus_adap->usb_dev->dev, "usb_control_msg(recv) failed, err %i\n", ret); return ret; } #ifdef TRACE dev_dbg(bus_adap->usb_dev->dev, "read %d bytes\n", recv_buf_len); #endif } return ret; } static int as102_send_ep1(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, int swap32) { int ret, actual_len; ret = usb_bulk_msg(bus_adap->usb_dev, usb_sndbulkpipe(bus_adap->usb_dev, 1), send_buf, send_buf_len, &actual_len, 200); if (ret) { dev_dbg(&bus_adap->usb_dev->dev, "usb_bulk_msg(send) failed, err %i\n", ret); return ret; } if (actual_len != send_buf_len) { dev_dbg(&bus_adap->usb_dev->dev, "only wrote %d of %d bytes\n", actual_len, send_buf_len); return -1; } return actual_len; } static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap, unsigned char *recv_buf, int recv_buf_len) { int ret, actual_len; if (recv_buf == NULL) return -EINVAL; ret = usb_bulk_msg(bus_adap->usb_dev, usb_rcvbulkpipe(bus_adap->usb_dev, 2), recv_buf, recv_buf_len, &actual_len, 200); if (ret) { dev_dbg(&bus_adap->usb_dev->dev, "usb_bulk_msg(recv) failed, err %i\n", ret); return ret; } if (actual_len != recv_buf_len) { dev_dbg(&bus_adap->usb_dev->dev, "only read %d of %d bytes\n", actual_len, recv_buf_len); return -1; } return actual_len; } static const struct as102_priv_ops_t as102_priv_ops = { .upload_fw_pkt = as102_send_ep1, .xfer_cmd = as102_usb_xfer_cmd, .as102_read_ep2 = as102_read_ep2, .start_stream = as102_usb_start_stream, .stop_stream = as102_usb_stop_stream, }; static int as102_submit_urb_stream(struct as102_dev_t *dev, struct urb *urb) { int err; usb_fill_bulk_urb(urb, dev->bus_adap.usb_dev, usb_rcvbulkpipe(dev->bus_adap.usb_dev, 0x2), urb->transfer_buffer, AS102_USB_BUF_SIZE, as102_urb_stream_irq, dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dev_dbg(&urb->dev->dev, "%s: usb_submit_urb failed\n", __func__); return err; } void as102_urb_stream_irq(struct urb *urb) { struct as102_dev_t *as102_dev = urb->context; if (urb->actual_length > 0) { dvb_dmx_swfilter(&as102_dev->dvb_dmx, urb->transfer_buffer, urb->actual_length); } else { if (urb->actual_length == 0) memset(urb->transfer_buffer, 0, AS102_USB_BUF_SIZE); } /* is not stopped, re-submit urb */ if (as102_dev->streaming) as102_submit_urb_stream(as102_dev, urb); } static void as102_free_usb_stream_buffer(struct as102_dev_t *dev) { int i; for (i = 0; i < MAX_STREAM_URB; i++) usb_free_urb(dev->stream_urb[i]); usb_free_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, dev->stream, dev->dma_addr); } static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev) { int i; dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, GFP_KERNEL, &dev->dma_addr); if (!dev->stream) { dev_dbg(&dev->bus_adap.usb_dev->dev, "%s: usb_buffer_alloc failed\n", __func__); return -ENOMEM; } memset(dev->stream, 0, MAX_STREAM_URB * AS102_USB_BUF_SIZE); /* init urb buffers */ for (i = 0; i < MAX_STREAM_URB; i++) { struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { as102_free_usb_stream_buffer(dev); return -ENOMEM; } urb->transfer_buffer = dev->stream + (i * AS102_USB_BUF_SIZE); urb->transfer_dma = dev->dma_addr + (i * AS102_USB_BUF_SIZE); urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer_length = AS102_USB_BUF_SIZE; dev->stream_urb[i] = urb; } return 0; } static void as102_usb_stop_stream(struct as102_dev_t *dev) { int i; for (i = 0; i < MAX_STREAM_URB; i++) usb_kill_urb(dev->stream_urb[i]); } static int as102_usb_start_stream(struct as102_dev_t *dev) { int i, ret = 0; for (i = 0; i < MAX_STREAM_URB; i++) { ret = as102_submit_urb_stream(dev, dev->stream_urb[i]); if (ret) { as102_usb_stop_stream(dev); return ret; } } return 0; } static void as102_usb_release(struct kref *kref) { struct as102_dev_t *as102_dev; as102_dev = container_of(kref, struct as102_dev_t, kref); usb_put_dev(as102_dev->bus_adap.usb_dev); kfree(as102_dev); } static void as102_usb_disconnect(struct usb_interface *intf) { struct as102_dev_t *as102_dev; /* extract as102_dev_t from usb_device private data */ as102_dev = usb_get_intfdata(intf); /* unregister dvb layer */ as102_dvb_unregister(as102_dev); /* free usb buffers */ as102_free_usb_stream_buffer(as102_dev); usb_set_intfdata(intf, NULL); /* usb unregister device */ usb_deregister_dev(intf, &as102_usb_class_driver); /* decrement usage counter */ kref_put(&as102_dev->kref, as102_usb_release); pr_info("%s: device has been disconnected\n", DRIVER_NAME); } static int as102_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct as102_dev_t *as102_dev; int i; /* This should never actually happen */ if (ARRAY_SIZE(as102_usb_id_table) != (sizeof(as102_device_names) / sizeof(const char *))) { pr_err("Device names table invalid size"); return -EINVAL; } as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL); if (as102_dev == NULL) return -ENOMEM; /* Assign the user-friendly device name */ for (i = 0; i < ARRAY_SIZE(as102_usb_id_table); i++) { if (id == &as102_usb_id_table[i]) { as102_dev->name = as102_device_names[i]; as102_dev->elna_cfg = as102_elna_cfg[i]; } } if (as102_dev->name == NULL) as102_dev->name = "Unknown AS102 device"; /* set private callback functions */ as102_dev->bus_adap.ops = &as102_priv_ops; /* init cmd token for usb bus */ as102_dev->bus_adap.cmd = &as102_dev->bus_adap.token.usb.c; as102_dev->bus_adap.rsp = &as102_dev->bus_adap.token.usb.r; /* init kernel device reference */ kref_init(&as102_dev->kref); /* store as102 device to usb_device private data */ usb_set_intfdata(intf, (void *) as102_dev); /* store in as102 device the usb_device pointer */ as102_dev->bus_adap.usb_dev = usb_get_dev(interface_to_usbdev(intf)); /* we can register the device now, as it is ready */ ret = usb_register_dev(intf, &as102_usb_class_driver); if (ret < 0) { /* something prevented us from registering this driver */ dev_err(&intf->dev, "%s: usb_register_dev() failed (errno = %d)\n", __func__, ret); goto failed; } pr_info("%s: device has been detected\n", DRIVER_NAME); /* request buffer allocation for streaming */ ret = as102_alloc_usb_stream_buffer(as102_dev); if (ret != 0) goto failed_stream; /* register dvb layer */ ret = as102_dvb_register(as102_dev); if (ret != 0) goto failed_dvb; return ret; failed_dvb: as102_free_usb_stream_buffer(as102_dev); failed_stream: usb_deregister_dev(intf, &as102_usb_class_driver); failed: usb_put_dev(as102_dev->bus_adap.usb_dev); usb_set_intfdata(intf, NULL); kfree(as102_dev); return ret; } static int as102_open(struct inode *inode, struct file *file) { int ret = 0, minor = 0; struct usb_interface *intf = NULL; struct as102_dev_t *dev = NULL; /* read minor from inode */ minor = iminor(inode); /* fetch device from usb interface */ intf = usb_find_interface(&as102_usb_driver, minor); if (intf == NULL) { pr_err("%s: can't find device for minor %d\n", __func__, minor); ret = -ENODEV; goto exit; } /* get our device */ dev = usb_get_intfdata(intf); if (dev == NULL) { ret = -EFAULT; goto exit; } /* save our device object in the file's private structure */ file->private_data = dev; /* increment our usage count for the device */ kref_get(&dev->kref); exit: return ret; } static int as102_release(struct inode *inode, struct file *file) { struct as102_dev_t *dev = NULL; dev = file->private_data; if (dev != NULL) { /* decrement the count on our device */ kref_put(&dev->kref, as102_usb_release); } return 0; } MODULE_DEVICE_TABLE(usb, as102_usb_id_table);
20 20 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 // SPDX-License-Identifier: GPL-2.0-or-later /* auditfilter.c -- filtering of audit events * * Copyright 2003-2004 Red Hat, Inc. * Copyright 2005 Hewlett-Packard Development Company, L.P. * Copyright 2005 IBM Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/audit.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/netlink.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <net/net_namespace.h> #include <net/sock.h> #include "audit.h" /* * Locking model: * * audit_filter_mutex: * Synchronizes writes and blocking reads of audit's filterlist * data. Rcu is used to traverse the filterlist and access * contents of structs audit_entry, audit_watch and opaque * LSM rules during filtering. If modified, these structures * must be copied and replace their counterparts in the filterlist. * An audit_parent struct is not accessed during filtering, so may * be written directly provided audit_filter_mutex is held. */ /* Audit filter lists, defined in <linux/audit.h> */ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_filter_list[0]), LIST_HEAD_INIT(audit_filter_list[1]), LIST_HEAD_INIT(audit_filter_list[2]), LIST_HEAD_INIT(audit_filter_list[3]), LIST_HEAD_INIT(audit_filter_list[4]), LIST_HEAD_INIT(audit_filter_list[5]), LIST_HEAD_INIT(audit_filter_list[6]), LIST_HEAD_INIT(audit_filter_list[7]), #if AUDIT_NR_FILTERS != 8 #error Fix audit_filter_list initialiser #endif }; static struct list_head audit_rules_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_rules_list[0]), LIST_HEAD_INIT(audit_rules_list[1]), LIST_HEAD_INIT(audit_rules_list[2]), LIST_HEAD_INIT(audit_rules_list[3]), LIST_HEAD_INIT(audit_rules_list[4]), LIST_HEAD_INIT(audit_rules_list[5]), LIST_HEAD_INIT(audit_rules_list[6]), LIST_HEAD_INIT(audit_rules_list[7]), }; DEFINE_MUTEX(audit_filter_mutex); static void audit_free_lsm_field(struct audit_field *f) { switch (f->type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: kfree(f->lsm_str); security_audit_rule_free(f->lsm_rule); } } static inline void audit_free_rule(struct audit_entry *e) { int i; struct audit_krule *erule = &e->rule; /* some rules don't have associated watches */ if (erule->watch) audit_put_watch(erule->watch); if (erule->fields) for (i = 0; i < erule->field_count; i++) audit_free_lsm_field(&erule->fields[i]); kfree(erule->fields); kfree(erule->filterkey); kfree(e); } void audit_free_rule_rcu(struct rcu_head *head) { struct audit_entry *e = container_of(head, struct audit_entry, rcu); audit_free_rule(e); } /* Initialize an audit filterlist entry. */ static inline struct audit_entry *audit_init_entry(u32 field_count) { struct audit_entry *entry; struct audit_field *fields; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (unlikely(!entry)) return NULL; fields = kcalloc(field_count, sizeof(*fields), GFP_KERNEL); if (unlikely(!fields)) { kfree(entry); return NULL; } entry->rule.fields = fields; return entry; } /* Unpack a filter field's string representation from user-space * buffer. */ char *audit_unpack_string(void **bufp, size_t *remain, size_t len) { char *str; if (!*bufp || (len == 0) || (len > *remain)) return ERR_PTR(-EINVAL); /* Of the currently implemented string fields, PATH_MAX * defines the longest valid length. */ if (len > PATH_MAX) return ERR_PTR(-ENAMETOOLONG); str = kmalloc(len + 1, GFP_KERNEL); if (unlikely(!str)) return ERR_PTR(-ENOMEM); memcpy(str, *bufp, len); str[len] = 0; *bufp += len; *remain -= len; return str; } /* Translate an inode field to kernel representation. */ static inline int audit_to_inode(struct audit_krule *krule, struct audit_field *f) { if ((krule->listnr != AUDIT_FILTER_EXIT && krule->listnr != AUDIT_FILTER_URING_EXIT) || krule->inode_f || krule->watch || krule->tree || (f->op != Audit_equal && f->op != Audit_not_equal)) return -EINVAL; krule->inode_f = f; return 0; } static __u32 *classes[AUDIT_SYSCALL_CLASSES]; int __init audit_register_class(int class, unsigned *list) { __u32 *p = kcalloc(AUDIT_BITMASK_SIZE, sizeof(__u32), GFP_KERNEL); if (!p) return -ENOMEM; while (*list != ~0U) { unsigned n = *list++; if (n >= AUDIT_BITMASK_SIZE * 32 - AUDIT_SYSCALL_CLASSES) { kfree(p); return -EINVAL; } p[AUDIT_WORD(n)] |= AUDIT_BIT(n); } if (class >= AUDIT_SYSCALL_CLASSES || classes[class]) { kfree(p); return -EINVAL; } classes[class] = p; return 0; } int audit_match_class(int class, unsigned syscall) { if (unlikely(syscall >= AUDIT_BITMASK_SIZE * 32)) return 0; if (unlikely(class >= AUDIT_SYSCALL_CLASSES || !classes[class])) return 0; return classes[class][AUDIT_WORD(syscall)] & AUDIT_BIT(syscall); } #ifdef CONFIG_AUDITSYSCALL static inline int audit_match_class_bits(int class, u32 *mask) { int i; if (classes[class]) { for (i = 0; i < AUDIT_BITMASK_SIZE; i++) if (mask[i] & classes[class][i]) return 0; } return 1; } static int audit_match_signal(struct audit_entry *entry) { struct audit_field *arch = entry->rule.arch_f; if (!arch) { /* When arch is unspecified, we must check both masks on biarch * as syscall number alone is ambiguous. */ return (audit_match_class_bits(AUDIT_CLASS_SIGNAL, entry->rule.mask) && audit_match_class_bits(AUDIT_CLASS_SIGNAL_32, entry->rule.mask)); } switch (audit_classify_arch(arch->val)) { case 0: /* native */ return (audit_match_class_bits(AUDIT_CLASS_SIGNAL, entry->rule.mask)); case 1: /* 32bit on biarch */ return (audit_match_class_bits(AUDIT_CLASS_SIGNAL_32, entry->rule.mask)); default: return 1; } } #endif /* Common user-space to kernel rule translation. */ static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule) { unsigned listnr; struct audit_entry *entry; int i, err; err = -EINVAL; listnr = rule->flags & ~AUDIT_FILTER_PREPEND; switch (listnr) { default: goto exit_err; #ifdef CONFIG_AUDITSYSCALL case AUDIT_FILTER_ENTRY: pr_err("AUDIT_FILTER_ENTRY is deprecated\n"); goto exit_err; case AUDIT_FILTER_EXIT: case AUDIT_FILTER_URING_EXIT: case AUDIT_FILTER_TASK: #endif case AUDIT_FILTER_USER: case AUDIT_FILTER_EXCLUDE: case AUDIT_FILTER_FS: ; } if (unlikely(rule->action == AUDIT_POSSIBLE)) { pr_err("AUDIT_POSSIBLE is deprecated\n"); goto exit_err; } if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) goto exit_err; if (rule->field_count > AUDIT_MAX_FIELDS) goto exit_err; err = -ENOMEM; entry = audit_init_entry(rule->field_count); if (!entry) goto exit_err; entry->rule.flags = rule->flags & AUDIT_FILTER_PREPEND; entry->rule.listnr = listnr; entry->rule.action = rule->action; entry->rule.field_count = rule->field_count; for (i = 0; i < AUDIT_BITMASK_SIZE; i++) entry->rule.mask[i] = rule->mask[i]; for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) { int bit = AUDIT_BITMASK_SIZE * 32 - i - 1; __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; __u32 *class; if (!(*p & AUDIT_BIT(bit))) continue; *p &= ~AUDIT_BIT(bit); class = classes[i]; if (class) { int j; for (j = 0; j < AUDIT_BITMASK_SIZE; j++) entry->rule.mask[j] |= class[j]; } } return entry; exit_err: return ERR_PTR(err); } static u32 audit_ops[] = { [Audit_equal] = AUDIT_EQUAL, [Audit_not_equal] = AUDIT_NOT_EQUAL, [Audit_bitmask] = AUDIT_BIT_MASK, [Audit_bittest] = AUDIT_BIT_TEST, [Audit_lt] = AUDIT_LESS_THAN, [Audit_gt] = AUDIT_GREATER_THAN, [Audit_le] = AUDIT_LESS_THAN_OR_EQUAL, [Audit_ge] = AUDIT_GREATER_THAN_OR_EQUAL, }; static u32 audit_to_op(u32 op) { u32 n; for (n = Audit_equal; n < Audit_bad && audit_ops[n] != op; n++) ; return n; } /* check if an audit field is valid */ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) { switch (f->type) { case AUDIT_MSGTYPE: if (entry->rule.listnr != AUDIT_FILTER_EXCLUDE && entry->rule.listnr != AUDIT_FILTER_USER) return -EINVAL; break; case AUDIT_FSTYPE: if (entry->rule.listnr != AUDIT_FILTER_FS) return -EINVAL; break; case AUDIT_PERM: if (entry->rule.listnr == AUDIT_FILTER_URING_EXIT) return -EINVAL; break; } switch (entry->rule.listnr) { case AUDIT_FILTER_FS: switch (f->type) { case AUDIT_FSTYPE: case AUDIT_FILTERKEY: break; default: return -EINVAL; } } /* Check for valid field type and op */ switch (f->type) { case AUDIT_ARG0: case AUDIT_ARG1: case AUDIT_ARG2: case AUDIT_ARG3: case AUDIT_PERS: /* <uapi/linux/personality.h> */ case AUDIT_DEVMINOR: /* all ops are valid */ break; case AUDIT_UID: case AUDIT_EUID: case AUDIT_SUID: case AUDIT_FSUID: case AUDIT_LOGINUID: case AUDIT_OBJ_UID: case AUDIT_GID: case AUDIT_EGID: case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: case AUDIT_PID: case AUDIT_MSGTYPE: case AUDIT_PPID: case AUDIT_DEVMAJOR: case AUDIT_EXIT: case AUDIT_SUCCESS: case AUDIT_INODE: case AUDIT_SESSIONID: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: case AUDIT_SADDR_FAM: /* bit ops are only useful on syscall args */ if (f->op == Audit_bitmask || f->op == Audit_bittest) return -EINVAL; break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_WATCH: case AUDIT_DIR: case AUDIT_FILTERKEY: case AUDIT_LOGINUID_SET: case AUDIT_ARCH: case AUDIT_FSTYPE: case AUDIT_PERM: case AUDIT_FILETYPE: case AUDIT_FIELD_COMPARE: case AUDIT_EXE: /* only equal and not equal valid ops */ if (f->op != Audit_not_equal && f->op != Audit_equal) return -EINVAL; break; default: /* field not recognized */ return -EINVAL; } /* Check for select valid field values */ switch (f->type) { case AUDIT_LOGINUID_SET: if ((f->val != 0) && (f->val != 1)) return -EINVAL; break; case AUDIT_PERM: if (f->val & ~15) return -EINVAL; break; case AUDIT_FILETYPE: if (f->val & ~S_IFMT) return -EINVAL; break; case AUDIT_FIELD_COMPARE: if (f->val > AUDIT_MAX_FIELD_COMPARE) return -EINVAL; break; case AUDIT_SADDR_FAM: if (f->val >= AF_MAX) return -EINVAL; break; default: break; } return 0; } /* Translate struct audit_rule_data to kernel's rule representation. */ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, size_t datasz) { int err = 0; struct audit_entry *entry; void *bufp; size_t remain = datasz - sizeof(struct audit_rule_data); int i; char *str; struct audit_fsnotify_mark *audit_mark; entry = audit_to_entry_common(data); if (IS_ERR(entry)) goto exit_nofree; bufp = data->buf; for (i = 0; i < data->field_count; i++) { struct audit_field *f = &entry->rule.fields[i]; u32 f_val; err = -EINVAL; f->op = audit_to_op(data->fieldflags[i]); if (f->op == Audit_bad) goto exit_free; f->type = data->fields[i]; f_val = data->values[i]; /* Support legacy tests for a valid loginuid */ if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) { f->type = AUDIT_LOGINUID_SET; f_val = 0; entry->rule.pflags |= AUDIT_LOGINUID_LEGACY; } err = audit_field_valid(entry, f); if (err) goto exit_free; err = -EINVAL; switch (f->type) { case AUDIT_LOGINUID: case AUDIT_UID: case AUDIT_EUID: case AUDIT_SUID: case AUDIT_FSUID: case AUDIT_OBJ_UID: f->uid = make_kuid(current_user_ns(), f_val); if (!uid_valid(f->uid)) goto exit_free; break; case AUDIT_GID: case AUDIT_EGID: case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: f->gid = make_kgid(current_user_ns(), f_val); if (!gid_valid(f->gid)) goto exit_free; break; case AUDIT_ARCH: f->val = f_val; entry->rule.arch_f = f; break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } entry->rule.buflen += f_val; f->lsm_str = str; err = security_audit_rule_init(f->type, f->op, str, (void **)&f->lsm_rule, GFP_KERNEL); /* Keep currently invalid fields around in case they * become valid after a policy reload. */ if (err == -EINVAL) { pr_warn("audit rule for LSM \'%s\' is invalid\n", str); err = 0; } else if (err) goto exit_free; break; case AUDIT_WATCH: str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } err = audit_to_watch(&entry->rule, str, f_val, f->op); if (err) { kfree(str); goto exit_free; } entry->rule.buflen += f_val; break; case AUDIT_DIR: str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } err = audit_make_tree(&entry->rule, str, f->op); kfree(str); if (err) goto exit_free; entry->rule.buflen += f_val; break; case AUDIT_INODE: f->val = f_val; err = audit_to_inode(&entry->rule, f); if (err) goto exit_free; break; case AUDIT_FILTERKEY: if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN) goto exit_free; str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } entry->rule.buflen += f_val; entry->rule.filterkey = str; break; case AUDIT_EXE: if (entry->rule.exe || f_val > PATH_MAX) goto exit_free; str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } audit_mark = audit_alloc_mark(&entry->rule, str, f_val); if (IS_ERR(audit_mark)) { kfree(str); err = PTR_ERR(audit_mark); goto exit_free; } entry->rule.buflen += f_val; entry->rule.exe = audit_mark; break; default: f->val = f_val; break; } } if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal) entry->rule.inode_f = NULL; exit_nofree: return entry; exit_free: if (entry->rule.tree) audit_put_tree(entry->rule.tree); /* that's the temporary one */ if (entry->rule.exe) audit_remove_mark(entry->rule.exe); /* that's the template one */ audit_free_rule(entry); return ERR_PTR(err); } /* Pack a filter field's string representation into data block. */ static inline size_t audit_pack_string(void **bufp, const char *str) { size_t len = strlen(str); memcpy(*bufp, str, len); *bufp += len; return len; } /* Translate kernel rule representation to struct audit_rule_data. */ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) { struct audit_rule_data *data; void *bufp; int i; data = kmalloc(struct_size(data, buf, krule->buflen), GFP_KERNEL); if (unlikely(!data)) return NULL; memset(data, 0, sizeof(*data)); data->flags = krule->flags | krule->listnr; data->action = krule->action; data->field_count = krule->field_count; bufp = data->buf; for (i = 0; i < data->field_count; i++) { struct audit_field *f = &krule->fields[i]; data->fields[i] = f->type; data->fieldflags[i] = audit_ops[f->op]; switch (f->type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: data->buflen += data->values[i] = audit_pack_string(&bufp, f->lsm_str); break; case AUDIT_WATCH: data->buflen += data->values[i] = audit_pack_string(&bufp, audit_watch_path(krule->watch)); break; case AUDIT_DIR: data->buflen += data->values[i] = audit_pack_string(&bufp, audit_tree_path(krule->tree)); break; case AUDIT_FILTERKEY: data->buflen += data->values[i] = audit_pack_string(&bufp, krule->filterkey); break; case AUDIT_EXE: data->buflen += data->values[i] = audit_pack_string(&bufp, audit_mark_path(krule->exe)); break; case AUDIT_LOGINUID_SET: if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) { data->fields[i] = AUDIT_LOGINUID; data->values[i] = AUDIT_UID_UNSET; break; } fallthrough; /* if set */ default: data->values[i] = f->val; } } for (i = 0; i < AUDIT_BITMASK_SIZE; i++) data->mask[i] = krule->mask[i]; return data; } /* Compare two rules in kernel format. Considered success if rules * don't match. */ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) { int i; if (a->flags != b->flags || a->pflags != b->pflags || a->listnr != b->listnr || a->action != b->action || a->field_count != b->field_count) return 1; for (i = 0; i < a->field_count; i++) { if (a->fields[i].type != b->fields[i].type || a->fields[i].op != b->fields[i].op) return 1; switch (a->fields[i].type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: if (strcmp(a->fields[i].lsm_str, b->fields[i].lsm_str)) return 1; break; case AUDIT_WATCH: if (strcmp(audit_watch_path(a->watch), audit_watch_path(b->watch))) return 1; break; case AUDIT_DIR: if (strcmp(audit_tree_path(a->tree), audit_tree_path(b->tree))) return 1; break; case AUDIT_FILTERKEY: /* both filterkeys exist based on above type compare */ if (strcmp(a->filterkey, b->filterkey)) return 1; break; case AUDIT_EXE: /* both paths exist based on above type compare */ if (strcmp(audit_mark_path(a->exe), audit_mark_path(b->exe))) return 1; break; case AUDIT_UID: case AUDIT_EUID: case AUDIT_SUID: case AUDIT_FSUID: case AUDIT_LOGINUID: case AUDIT_OBJ_UID: if (!uid_eq(a->fields[i].uid, b->fields[i].uid)) return 1; break; case AUDIT_GID: case AUDIT_EGID: case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: if (!gid_eq(a->fields[i].gid, b->fields[i].gid)) return 1; break; default: if (a->fields[i].val != b->fields[i].val) return 1; } } for (i = 0; i < AUDIT_BITMASK_SIZE; i++) if (a->mask[i] != b->mask[i]) return 1; return 0; } /* Duplicate LSM field information. The lsm_rule is opaque, so must be * re-initialized. */ static inline int audit_dupe_lsm_field(struct audit_field *df, struct audit_field *sf) { int ret; char *lsm_str; /* our own copy of lsm_str */ lsm_str = kstrdup(sf->lsm_str, GFP_KERNEL); if (unlikely(!lsm_str)) return -ENOMEM; df->lsm_str = lsm_str; /* our own (refreshed) copy of lsm_rule */ ret = security_audit_rule_init(df->type, df->op, df->lsm_str, (void **)&df->lsm_rule, GFP_KERNEL); /* Keep currently invalid fields around in case they * become valid after a policy reload. */ if (ret == -EINVAL) { pr_warn("audit rule for LSM \'%s\' is invalid\n", df->lsm_str); ret = 0; } return ret; } /* Duplicate an audit rule. This will be a deep copy with the exception * of the watch - that pointer is carried over. The LSM specific fields * will be updated in the copy. The point is to be able to replace the old * rule with the new rule in the filterlist, then free the old rule. * The rlist element is undefined; list manipulations are handled apart from * the initial copy. */ struct audit_entry *audit_dupe_rule(struct audit_krule *old) { u32 fcount = old->field_count; struct audit_entry *entry; struct audit_krule *new; char *fk; int i, err = 0; entry = audit_init_entry(fcount); if (unlikely(!entry)) return ERR_PTR(-ENOMEM); new = &entry->rule; new->flags = old->flags; new->pflags = old->pflags; new->listnr = old->listnr; new->action = old->action; for (i = 0; i < AUDIT_BITMASK_SIZE; i++) new->mask[i] = old->mask[i]; new->prio = old->prio; new->buflen = old->buflen; new->inode_f = old->inode_f; new->field_count = old->field_count; /* * note that we are OK with not refcounting here; audit_match_tree() * never dereferences tree and we can't get false positives there * since we'd have to have rule gone from the list *and* removed * before the chunks found by lookup had been allocated, i.e. before * the beginning of list scan. */ new->tree = old->tree; memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount); /* deep copy this information, updating the lsm_rule fields, because * the originals will all be freed when the old rule is freed. */ for (i = 0; i < fcount; i++) { switch (new->fields[i].type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: err = audit_dupe_lsm_field(&new->fields[i], &old->fields[i]); break; case AUDIT_FILTERKEY: fk = kstrdup(old->filterkey, GFP_KERNEL); if (unlikely(!fk)) err = -ENOMEM; else new->filterkey = fk; break; case AUDIT_EXE: err = audit_dupe_exe(new, old); break; } if (err) { if (new->exe) audit_remove_mark(new->exe); audit_free_rule(entry); return ERR_PTR(err); } } if (old->watch) { audit_get_watch(old->watch); new->watch = old->watch; } return entry; } /* Find an existing audit rule. * Caller must hold audit_filter_mutex to prevent stale rule data. */ static struct audit_entry *audit_find_rule(struct audit_entry *entry, struct list_head **p) { struct audit_entry *e, *found = NULL; struct list_head *list; int h; if (entry->rule.inode_f) { h = audit_hash_ino(entry->rule.inode_f->val); *p = list = &audit_inode_hash[h]; } else if (entry->rule.watch) { /* we don't know the inode number, so must walk entire hash */ for (h = 0; h < AUDIT_INODE_BUCKETS; h++) { list = &audit_inode_hash[h]; list_for_each_entry(e, list, list) if (!audit_compare_rule(&entry->rule, &e->rule)) { found = e; goto out; } } goto out; } else { *p = list = &audit_filter_list[entry->rule.listnr]; } list_for_each_entry(e, list, list) if (!audit_compare_rule(&entry->rule, &e->rule)) { found = e; goto out; } out: return found; } static u64 prio_low = ~0ULL/2; static u64 prio_high = ~0ULL/2 - 1; /* Add rule to given filterlist if not a duplicate. */ static inline int audit_add_rule(struct audit_entry *entry) { struct audit_entry *e; struct audit_watch *watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; struct list_head *list; int err = 0; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; /* If any of these, don't count towards total */ switch (entry->rule.listnr) { case AUDIT_FILTER_USER: case AUDIT_FILTER_EXCLUDE: case AUDIT_FILTER_FS: dont_count = 1; } #endif mutex_lock(&audit_filter_mutex); e = audit_find_rule(entry, &list); if (e) { mutex_unlock(&audit_filter_mutex); err = -EEXIST; /* normally audit_add_tree_rule() will free it on failure */ if (tree) audit_put_tree(tree); return err; } if (watch) { /* audit_filter_mutex is dropped and re-taken during this call */ err = audit_add_watch(&entry->rule, &list); if (err) { mutex_unlock(&audit_filter_mutex); /* * normally audit_add_tree_rule() will free it * on failure */ if (tree) audit_put_tree(tree); return err; } } if (tree) { err = audit_add_tree_rule(&entry->rule); if (err) { mutex_unlock(&audit_filter_mutex); return err; } } entry->rule.prio = ~0ULL; if (entry->rule.listnr == AUDIT_FILTER_EXIT || entry->rule.listnr == AUDIT_FILTER_URING_EXIT) { if (entry->rule.flags & AUDIT_FILTER_PREPEND) entry->rule.prio = ++prio_high; else entry->rule.prio = --prio_low; } if (entry->rule.flags & AUDIT_FILTER_PREPEND) { list_add(&entry->rule.list, &audit_rules_list[entry->rule.listnr]); list_add_rcu(&entry->list, list); entry->rule.flags &= ~AUDIT_FILTER_PREPEND; } else { list_add_tail(&entry->rule.list, &audit_rules_list[entry->rule.listnr]); list_add_tail_rcu(&entry->list, list); } #ifdef CONFIG_AUDITSYSCALL if (!dont_count) audit_n_rules++; if (!audit_match_signal(entry)) audit_signals++; #endif mutex_unlock(&audit_filter_mutex); return err; } /* Remove an existing rule from filterlist. */ int audit_del_rule(struct audit_entry *entry) { struct audit_entry *e; struct audit_tree *tree = entry->rule.tree; struct list_head *list; int ret = 0; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; /* If any of these, don't count towards total */ switch (entry->rule.listnr) { case AUDIT_FILTER_USER: case AUDIT_FILTER_EXCLUDE: case AUDIT_FILTER_FS: dont_count = 1; } #endif mutex_lock(&audit_filter_mutex); e = audit_find_rule(entry, &list); if (!e) { ret = -ENOENT; goto out; } if (e->rule.watch) audit_remove_watch_rule(&e->rule); if (e->rule.tree) audit_remove_tree_rule(&e->rule); if (e->rule.exe) audit_remove_mark_rule(&e->rule); #ifdef CONFIG_AUDITSYSCALL if (!dont_count) audit_n_rules--; if (!audit_match_signal(entry)) audit_signals--; #endif list_del_rcu(&e->list); list_del(&e->rule.list); call_rcu(&e->rcu, audit_free_rule_rcu); out: mutex_unlock(&audit_filter_mutex); if (tree) audit_put_tree(tree); /* that's the temporary one */ return ret; } /* List rules using struct audit_rule_data. */ static void audit_list_rules(int seq, struct sk_buff_head *q) { struct sk_buff *skb; struct audit_krule *r; int i; /* This is a blocking read, so use audit_filter_mutex instead of rcu * iterator to sync with list writers. */ for (i = 0; i < AUDIT_NR_FILTERS; i++) { list_for_each_entry(r, &audit_rules_list[i], list) { struct audit_rule_data *data; data = audit_krule_to_data(r); if (unlikely(!data)) break; skb = audit_make_reply(seq, AUDIT_LIST_RULES, 0, 1, data, struct_size(data, buf, data->buflen)); if (skb) skb_queue_tail(q, skb); kfree(data); } } skb = audit_make_reply(seq, AUDIT_LIST_RULES, 1, 1, NULL, 0); if (skb) skb_queue_tail(q, skb); } /* Log rule additions and removals */ static void audit_log_rule_change(char *action, struct audit_krule *rule, int res) { struct audit_buffer *ab; if (!audit_enabled) return; ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_CONFIG_CHANGE); if (!ab) return; audit_log_session_info(ab); audit_log_task_context(ab); audit_log_format(ab, " op=%s", action); audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=%d", rule->listnr, res); audit_log_end(ab); } /** * audit_rule_change - apply all rules to the specified message type * @type: audit message type * @seq: netlink audit message sequence (serial) number * @data: payload data * @datasz: size of payload data */ int audit_rule_change(int type, int seq, void *data, size_t datasz) { int err = 0; struct audit_entry *entry; switch (type) { case AUDIT_ADD_RULE: entry = audit_data_to_entry(data, datasz); if (IS_ERR(entry)) return PTR_ERR(entry); err = audit_add_rule(entry); audit_log_rule_change("add_rule", &entry->rule, !err); break; case AUDIT_DEL_RULE: entry = audit_data_to_entry(data, datasz); if (IS_ERR(entry)) return PTR_ERR(entry); err = audit_del_rule(entry); audit_log_rule_change("remove_rule", &entry->rule, !err); break; default: WARN_ON(1); return -EINVAL; } if (err || type == AUDIT_DEL_RULE) { if (entry->rule.exe) audit_remove_mark(entry->rule.exe); audit_free_rule(entry); } return err; } /** * audit_list_rules_send - list the audit rules * @request_skb: skb of request we are replying to (used to target the reply) * @seq: netlink audit message sequence (serial) number */ int audit_list_rules_send(struct sk_buff *request_skb, int seq) { struct task_struct *tsk; struct audit_netlink_list *dest; /* We can't just spew out the rules here because we might fill * the available socket buffer space and deadlock waiting for * auditctl to read from it... which isn't ever going to * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ dest = kmalloc(sizeof(*dest), GFP_KERNEL); if (!dest) return -ENOMEM; dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); dest->portid = NETLINK_CB(request_skb).portid; skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); audit_list_rules(seq, &dest->q); mutex_unlock(&audit_filter_mutex); tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); if (IS_ERR(tsk)) { skb_queue_purge(&dest->q); put_net(dest->net); kfree(dest); return PTR_ERR(tsk); } return 0; } int audit_comparator(u32 left, u32 op, u32 right) { switch (op) { case Audit_equal: return (left == right); case Audit_not_equal: return (left != right); case Audit_lt: return (left < right); case Audit_le: return (left <= right); case Audit_gt: return (left > right); case Audit_ge: return (left >= right); case Audit_bitmask: return (left & right); case Audit_bittest: return ((left & right) == right); default: return 0; } } int audit_uid_comparator(kuid_t left, u32 op, kuid_t right) { switch (op) { case Audit_equal: return uid_eq(left, right); case Audit_not_equal: return !uid_eq(left, right); case Audit_lt: return uid_lt(left, right); case Audit_le: return uid_lte(left, right); case Audit_gt: return uid_gt(left, right); case Audit_ge: return uid_gte(left, right); case Audit_bitmask: case Audit_bittest: default: return 0; } } int audit_gid_comparator(kgid_t left, u32 op, kgid_t right) { switch (op) { case Audit_equal: return gid_eq(left, right); case Audit_not_equal: return !gid_eq(left, right); case Audit_lt: return gid_lt(left, right); case Audit_le: return gid_lte(left, right); case Audit_gt: return gid_gt(left, right); case Audit_ge: return gid_gte(left, right); case Audit_bitmask: case Audit_bittest: default: return 0; } } /** * parent_len - find the length of the parent portion of a pathname * @path: pathname of which to determine length */ int parent_len(const char *path) { int plen; const char *p; plen = strlen(path); if (plen == 0) return plen; /* disregard trailing slashes */ p = path + plen - 1; while ((*p == '/') && (p > path)) p--; /* walk backward until we find the next slash or hit beginning */ while ((*p != '/') && (p > path)) p--; /* did we find a slash? Then increment to include it in path */ if (*p == '/') p++; return p - path; } /** * audit_compare_dname_path - compare given dentry name with last component in * given path. Return of 0 indicates a match. * @dname: dentry name that we're comparing * @path: full pathname that we're comparing * @parentlen: length of the parent if known. Passing in AUDIT_NAME_FULL * here indicates that we must compute this value. */ int audit_compare_dname_path(const struct qstr *dname, const char *path, int parentlen) { int dlen, pathlen; const char *p; dlen = dname->len; pathlen = strlen(path); if (pathlen < dlen) return 1; if (parentlen == AUDIT_NAME_FULL) parentlen = parent_len(path); p = path + parentlen; /* handle trailing slashes */ pathlen -= parentlen; while (p[pathlen - 1] == '/') pathlen--; if (pathlen != dlen) return 1; return memcmp(p, dname->name, dlen); } int audit_filter(int msgtype, unsigned int listtype) { struct audit_entry *e; int ret = 1; /* Audit by default */ rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[listtype], list) { int i, result = 0; for (i = 0; i < e->rule.field_count; i++) { struct audit_field *f = &e->rule.fields[i]; struct lsm_prop prop = { }; pid_t pid; switch (f->type) { case AUDIT_PID: pid = task_tgid_nr(current); result = audit_comparator(pid, f->op, f->val); break; case AUDIT_UID: result = audit_uid_comparator(current_uid(), f->op, f->uid); break; case AUDIT_GID: result = audit_gid_comparator(current_gid(), f->op, f->gid); break; case AUDIT_LOGINUID: result = audit_uid_comparator(audit_get_loginuid(current), f->op, f->uid); break; case AUDIT_LOGINUID_SET: result = audit_comparator(audit_loginuid_set(current), f->op, f->val); break; case AUDIT_MSGTYPE: result = audit_comparator(msgtype, f->op, f->val); break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: if (f->lsm_rule) { security_current_getlsmprop_subj(&prop); result = security_audit_rule_match( &prop, f->type, f->op, f->lsm_rule); } break; case AUDIT_EXE: result = audit_exe_compare(current, e->rule.exe); if (f->op == Audit_not_equal) result = !result; break; default: goto unlock_and_return; } if (result < 0) /* error */ goto unlock_and_return; if (!result) break; } if (result > 0) { if (e->rule.action == AUDIT_NEVER || listtype == AUDIT_FILTER_EXCLUDE) ret = 0; break; } } unlock_and_return: rcu_read_unlock(); return ret; } static int update_lsm_rule(struct audit_krule *r) { struct audit_entry *entry = container_of(r, struct audit_entry, rule); struct audit_entry *nentry; int err = 0; if (!security_audit_rule_known(r)) return 0; nentry = audit_dupe_rule(r); if (entry->rule.exe) audit_remove_mark(entry->rule.exe); if (IS_ERR(nentry)) { /* save the first error encountered for the * return value */ err = PTR_ERR(nentry); audit_panic("error updating LSM filters"); if (r->watch) list_del(&r->rlist); list_del_rcu(&entry->list); list_del(&r->list); } else { if (r->watch || r->tree) list_replace_init(&r->rlist, &nentry->rule.rlist); list_replace_rcu(&entry->list, &nentry->list); list_replace(&r->list, &nentry->rule.list); } call_rcu(&entry->rcu, audit_free_rule_rcu); return err; } /* This function will re-initialize the lsm_rule field of all applicable rules. * It will traverse the filter lists serarching for rules that contain LSM * specific filter fields. When such a rule is found, it is copied, the * LSM field is re-initialized, and the old rule is replaced with the * updated rule. */ int audit_update_lsm_rules(void) { struct audit_krule *r, *n; int i, err = 0; /* audit_filter_mutex synchronizes the writers */ mutex_lock(&audit_filter_mutex); for (i = 0; i < AUDIT_NR_FILTERS; i++) { list_for_each_entry_safe(r, n, &audit_rules_list[i], list) { int res = update_lsm_rule(r); if (!err) err = res; } } mutex_unlock(&audit_filter_mutex); return err; }
5 5 2 5 6 6 5 1 1 5 5 5 5 5 5 5 5 5 5 5 5 3 5 5 3 5 6 5 5 5 4 6 1 2 1 1 1 6 6 3 3 3 6 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 // SPDX-License-Identifier: GPL-2.0 /* * Copyright 2007, Frank A Kingswood <frank@kingswood-consulting.co.uk> * Copyright 2007, Werner Cornelius <werner@cornelius-consult.de> * Copyright 2009, Boris Hajduk <boris@hajduk.org> * * ch341.c implements a serial port driver for the Winchiphead CH341. * * The CH341 device can be used to implement an RS232 asynchronous * serial port, an IEEE-1284 parallel printer port or a memory-like * interface. In all cases the CH341 supports an I2C interface as well. * This driver only supports the asynchronous serial interface. */ #include <linux/kernel.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include <linux/unaligned.h> #define DEFAULT_BAUD_RATE 9600 #define DEFAULT_TIMEOUT 1000 /* flags for IO-Bits */ #define CH341_BIT_RTS (1 << 6) #define CH341_BIT_DTR (1 << 5) /******************************/ /* interrupt pipe definitions */ /******************************/ /* always 4 interrupt bytes */ /* first irq byte normally 0x08 */ /* second irq byte base 0x7d + below */ /* third irq byte base 0x94 + below */ /* fourth irq byte normally 0xee */ /* second interrupt byte */ #define CH341_MULT_STAT 0x04 /* multiple status since last interrupt event */ /* status returned in third interrupt answer byte, inverted in data from irq */ #define CH341_BIT_CTS 0x01 #define CH341_BIT_DSR 0x02 #define CH341_BIT_RI 0x04 #define CH341_BIT_DCD 0x08 #define CH341_BITS_MODEM_STAT 0x0f /* all bits */ /* Break support - the information used to implement this was gleaned from * the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato. */ #define CH341_REQ_READ_VERSION 0x5F #define CH341_REQ_WRITE_REG 0x9A #define CH341_REQ_READ_REG 0x95 #define CH341_REQ_SERIAL_INIT 0xA1 #define CH341_REQ_MODEM_CTRL 0xA4 #define CH341_REG_BREAK 0x05 #define CH341_REG_PRESCALER 0x12 #define CH341_REG_DIVISOR 0x13 #define CH341_REG_LCR 0x18 #define CH341_REG_LCR2 0x25 #define CH341_REG_FLOW_CTL 0x27 #define CH341_NBREAK_BITS 0x01 #define CH341_LCR_ENABLE_RX 0x80 #define CH341_LCR_ENABLE_TX 0x40 #define CH341_LCR_MARK_SPACE 0x20 #define CH341_LCR_PAR_EVEN 0x10 #define CH341_LCR_ENABLE_PAR 0x08 #define CH341_LCR_STOP_BITS_2 0x04 #define CH341_LCR_CS8 0x03 #define CH341_LCR_CS7 0x02 #define CH341_LCR_CS6 0x01 #define CH341_LCR_CS5 0x00 #define CH341_FLOW_CTL_NONE 0x00 #define CH341_FLOW_CTL_RTSCTS 0x01 #define CH341_QUIRK_LIMITED_PRESCALER BIT(0) #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, { USB_DEVICE(0x2184, 0x0057) }, { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x9986, 0x7523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); struct ch341_private { spinlock_t lock; /* access lock */ unsigned baud_rate; /* set baud rate */ u8 mcr; u8 msr; u8 lcr; unsigned long quirks; u8 version; unsigned long break_end; }; static void ch341_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int ch341_control_out(struct usb_device *dev, u8 request, u16 value, u16 index) { int r; dev_dbg(&dev->dev, "%s - (%02x,%04x,%04x)\n", __func__, request, value, index); r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, value, index, NULL, 0, DEFAULT_TIMEOUT); if (r < 0) dev_err(&dev->dev, "failed to send control message: %d\n", r); return r; } static int ch341_control_in(struct usb_device *dev, u8 request, u16 value, u16 index, char *buf, unsigned bufsize) { int r; dev_dbg(&dev->dev, "%s - (%02x,%04x,%04x,%u)\n", __func__, request, value, index, bufsize); r = usb_control_msg_recv(dev, 0, request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT, GFP_KERNEL); if (r) { dev_err(&dev->dev, "failed to receive control message: %d\n", r); return r; } return 0; } #define CH341_CLKRATE 48000000 #define CH341_CLK_DIV(ps, fact) (1 << (12 - 3 * (ps) - (fact))) #define CH341_MIN_RATE(ps) (CH341_CLKRATE / (CH341_CLK_DIV((ps), 1) * 512)) static const speed_t ch341_min_rates[] = { CH341_MIN_RATE(0), CH341_MIN_RATE(1), CH341_MIN_RATE(2), CH341_MIN_RATE(3), }; /* Supported range is 46 to 3000000 bps. */ #define CH341_MIN_BPS DIV_ROUND_UP(CH341_CLKRATE, CH341_CLK_DIV(0, 0) * 256) #define CH341_MAX_BPS (CH341_CLKRATE / (CH341_CLK_DIV(3, 0) * 2)) /* * The device line speed is given by the following equation: * * baudrate = 48000000 / (2^(12 - 3 * ps - fact) * div), where * * 0 <= ps <= 3, * 0 <= fact <= 1, * 2 <= div <= 256 if fact = 0, or * 9 <= div <= 256 if fact = 1 */ static int ch341_get_divisor(struct ch341_private *priv, speed_t speed) { unsigned int fact, div, clk_div; bool force_fact0 = false; int ps; /* * Clamp to supported range, this makes the (ps < 0) and (div < 2) * sanity checks below redundant. */ speed = clamp_val(speed, CH341_MIN_BPS, CH341_MAX_BPS); /* * Start with highest possible base clock (fact = 1) that will give a * divisor strictly less than 512. */ fact = 1; for (ps = 3; ps >= 0; ps--) { if (speed > ch341_min_rates[ps]) break; } if (ps < 0) return -EINVAL; /* Determine corresponding divisor, rounding down. */ clk_div = CH341_CLK_DIV(ps, fact); div = CH341_CLKRATE / (clk_div * speed); /* Some devices require a lower base clock if ps < 3. */ if (ps < 3 && (priv->quirks & CH341_QUIRK_LIMITED_PRESCALER)) force_fact0 = true; /* Halve base clock (fact = 0) if required. */ if (div < 9 || div > 255 || force_fact0) { div /= 2; clk_div *= 2; fact = 0; } if (div < 2) return -EINVAL; /* * Pick next divisor if resulting rate is closer to the requested one, * scale up to avoid rounding errors on low rates. */ if (16 * CH341_CLKRATE / (clk_div * div) - 16 * speed >= 16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1))) div++; /* * Prefer lower base clock (fact = 0) if even divisor. * * Note that this makes the receiver more tolerant to errors. */ if (fact == 1 && div % 2 == 0) { div /= 2; fact = 0; } return (0x100 - div) << 8 | fact << 2 | ps; } static int ch341_set_baudrate_lcr(struct usb_device *dev, struct ch341_private *priv, speed_t baud_rate, u8 lcr) { int val; int r; if (!baud_rate) return -EINVAL; val = ch341_get_divisor(priv, baud_rate); if (val < 0) return -EINVAL; /* * CH341A buffers data until a full endpoint-size packet (32 bytes) * has been received unless bit 7 is set. * * At least one device with version 0x27 appears to have this bit * inverted. */ if (priv->version > 0x27) val |= BIT(7); r = ch341_control_out(dev, CH341_REQ_WRITE_REG, CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER, val); if (r) return r; /* * Chip versions before version 0x30 as read using * CH341_REQ_READ_VERSION used separate registers for line control * (stop bits, parity and word length). Version 0x30 and above use * CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero. */ if (priv->version < 0x30) return 0; r = ch341_control_out(dev, CH341_REQ_WRITE_REG, CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr); if (r) return r; return r; } static int ch341_set_handshake(struct usb_device *dev, u8 control) { return ch341_control_out(dev, CH341_REQ_MODEM_CTRL, ~control, 0); } static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) { const unsigned int size = 2; u8 buffer[2]; int r; unsigned long flags; r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size); if (r) return r; spin_lock_irqsave(&priv->lock, flags); priv->msr = (~(*buffer)) & CH341_BITS_MODEM_STAT; spin_unlock_irqrestore(&priv->lock, flags); return 0; } /* -------------------------------------------------------------------------- */ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) { const unsigned int size = 2; u8 buffer[2]; int r; /* expect two bytes 0x27 0x00 */ r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size); if (r) return r; priv->version = buffer[0]; dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version); r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0); if (r < 0) return r; r = ch341_set_baudrate_lcr(dev, priv, priv->baud_rate, priv->lcr); if (r < 0) return r; r = ch341_set_handshake(dev, priv->mcr); if (r < 0) return r; return 0; } static int ch341_detect_quirks(struct usb_serial_port *port) { struct ch341_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; const unsigned int size = 2; unsigned long quirks = 0; u8 buffer[2]; int r; /* * A subset of CH34x devices does not support all features. The * prescaler is limited and there is no support for sending a RS232 * break condition. A read failure when trying to set up the latter is * used to detect these devices. */ r = usb_control_msg_recv(udev, 0, CH341_REQ_READ_REG, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, CH341_REG_BREAK, 0, &buffer, size, DEFAULT_TIMEOUT, GFP_KERNEL); if (r == -EPIPE) { dev_info(&port->dev, "break control not supported, using simulated break\n"); quirks = CH341_QUIRK_LIMITED_PRESCALER | CH341_QUIRK_SIMULATE_BREAK; r = 0; } else if (r) { dev_err(&port->dev, "failed to read break control: %d\n", r); } if (quirks) { dev_dbg(&port->dev, "enabling quirk flags: 0x%02lx\n", quirks); priv->quirks |= quirks; } return r; } static int ch341_port_probe(struct usb_serial_port *port) { struct ch341_private *priv; int r; priv = kzalloc(sizeof(struct ch341_private), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); priv->baud_rate = DEFAULT_BAUD_RATE; /* * Some CH340 devices appear unable to change the initial LCR * settings, so set a sane 8N1 default. */ priv->lcr = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX | CH341_LCR_CS8; r = ch341_configure(port->serial->dev, priv); if (r < 0) goto error; usb_set_serial_port_data(port, priv); r = ch341_detect_quirks(port); if (r < 0) goto error; return 0; error: kfree(priv); return r; } static void ch341_port_remove(struct usb_serial_port *port) { struct ch341_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); } static int ch341_carrier_raised(struct usb_serial_port *port) { struct ch341_private *priv = usb_get_serial_port_data(port); if (priv->msr & CH341_BIT_DCD) return 1; return 0; } static void ch341_dtr_rts(struct usb_serial_port *port, int on) { struct ch341_private *priv = usb_get_serial_port_data(port); unsigned long flags; /* drop DTR and RTS */ spin_lock_irqsave(&priv->lock, flags); if (on) priv->mcr |= CH341_BIT_RTS | CH341_BIT_DTR; else priv->mcr &= ~(CH341_BIT_RTS | CH341_BIT_DTR); spin_unlock_irqrestore(&priv->lock, flags); ch341_set_handshake(port->serial->dev, priv->mcr); } static void ch341_close(struct usb_serial_port *port) { usb_serial_generic_close(port); usb_kill_urb(port->interrupt_in_urb); } /* open this device, set default parameters */ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) { struct ch341_private *priv = usb_get_serial_port_data(port); int r; if (tty) ch341_set_termios(tty, port, NULL); dev_dbg(&port->dev, "%s - submitting interrupt urb\n", __func__); r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (r) { dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n", __func__, r); return r; } r = ch341_get_status(port->serial->dev, priv); if (r < 0) { dev_err(&port->dev, "failed to read modem status: %d\n", r); goto err_kill_interrupt_urb; } r = usb_serial_generic_open(tty, port); if (r) goto err_kill_interrupt_urb; return 0; err_kill_interrupt_urb: usb_kill_urb(port->interrupt_in_urb); return r; } static void ch341_set_flow_control(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { u16 flow_ctl; int r; if (C_CRTSCTS(tty)) flow_ctl = CH341_FLOW_CTL_RTSCTS; else flow_ctl = CH341_FLOW_CTL_NONE; r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG, (CH341_REG_FLOW_CTL << 8) | CH341_REG_FLOW_CTL, (flow_ctl << 8) | flow_ctl); if (r < 0 && old_termios) { tty->termios.c_cflag &= ~CRTSCTS; tty->termios.c_cflag |= (old_termios->c_cflag & CRTSCTS); } } /* Old_termios contains the original termios settings and * tty->termios contains the new setting to be used. */ static void ch341_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct ch341_private *priv = usb_get_serial_port_data(port); unsigned baud_rate; unsigned long flags; u8 lcr; int r; /* redundant changes may cause the chip to lose bytes */ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) return; baud_rate = tty_get_baud_rate(tty); lcr = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX; switch (C_CSIZE(tty)) { case CS5: lcr |= CH341_LCR_CS5; break; case CS6: lcr |= CH341_LCR_CS6; break; case CS7: lcr |= CH341_LCR_CS7; break; case CS8: lcr |= CH341_LCR_CS8; break; } if (C_PARENB(tty)) { lcr |= CH341_LCR_ENABLE_PAR; if (C_PARODD(tty) == 0) lcr |= CH341_LCR_PAR_EVEN; if (C_CMSPAR(tty)) lcr |= CH341_LCR_MARK_SPACE; } if (C_CSTOPB(tty)) lcr |= CH341_LCR_STOP_BITS_2; if (baud_rate) { priv->baud_rate = baud_rate; r = ch341_set_baudrate_lcr(port->serial->dev, priv, priv->baud_rate, lcr); if (r < 0 && old_termios) { priv->baud_rate = tty_termios_baud_rate(old_termios); tty_termios_copy_hw(&tty->termios, old_termios); } else if (r == 0) { priv->lcr = lcr; } } spin_lock_irqsave(&priv->lock, flags); if (C_BAUD(tty) == B0) priv->mcr &= ~(CH341_BIT_DTR | CH341_BIT_RTS); else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) priv->mcr |= (CH341_BIT_DTR | CH341_BIT_RTS); spin_unlock_irqrestore(&priv->lock, flags); ch341_set_handshake(port->serial->dev, priv->mcr); ch341_set_flow_control(tty, port, old_termios); } /* * A subset of all CH34x devices don't support a real break condition and * reading CH341_REG_BREAK fails (see also ch341_detect_quirks). This function * simulates a break condition by lowering the baud rate to the minimum * supported by the hardware upon enabling the break condition and sending * a NUL byte. * * Incoming data is corrupted while the break condition is being simulated. * * Normally the duration of the break condition can be controlled individually * by userspace using TIOCSBRK and TIOCCBRK or by passing an argument to * TCSBRKP. Due to how the simulation is implemented the duration can't be * controlled. The duration is always about (1s / 46bd * 9bit) = 196ms. */ static int ch341_simulate_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ch341_private *priv = usb_get_serial_port_data(port); unsigned long now, delay; int r, r2; if (break_state != 0) { dev_dbg(&port->dev, "enter break state requested\n"); r = ch341_set_baudrate_lcr(port->serial->dev, priv, CH341_MIN_BPS, CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX | CH341_LCR_CS8); if (r < 0) { dev_err(&port->dev, "failed to change baud rate to %u: %d\n", CH341_MIN_BPS, r); goto restore; } r = tty_put_char(tty, '\0'); if (r < 0) { dev_err(&port->dev, "failed to write NUL byte for simulated break condition: %d\n", r); goto restore; } /* * Compute expected transmission duration including safety * margin. The original baud rate is only restored after the * computed point in time. * * 11 bits = 1 start, 8 data, 1 stop, 1 margin */ priv->break_end = jiffies + (11 * HZ / CH341_MIN_BPS); return 0; } dev_dbg(&port->dev, "leave break state requested\n"); now = jiffies; if (time_before(now, priv->break_end)) { /* Wait until NUL byte is written */ delay = priv->break_end - now; dev_dbg(&port->dev, "wait %d ms while transmitting NUL byte at %u baud\n", jiffies_to_msecs(delay), CH341_MIN_BPS); schedule_timeout_interruptible(delay); } r = 0; restore: /* Restore original baud rate */ r2 = ch341_set_baudrate_lcr(port->serial->dev, priv, priv->baud_rate, priv->lcr); if (r2 < 0) { dev_err(&port->dev, "restoring original baud rate of %u failed: %d\n", priv->baud_rate, r2); return r2; } return r; } static int ch341_break_ctl(struct tty_struct *tty, int break_state) { const u16 ch341_break_reg = (CH341_REG_LCR << 8) | CH341_REG_BREAK; struct usb_serial_port *port = tty->driver_data; struct ch341_private *priv = usb_get_serial_port_data(port); u16 reg_contents; u8 break_reg[2]; int r; if (priv->quirks & CH341_QUIRK_SIMULATE_BREAK) return ch341_simulate_break(tty, break_state); r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG, ch341_break_reg, 0, break_reg, 2); if (r) { dev_err(&port->dev, "%s - USB control read error (%d)\n", __func__, r); if (r > 0) r = -EIO; return r; } dev_dbg(&port->dev, "%s - initial ch341 break register contents - reg1: %x, reg2: %x\n", __func__, break_reg[0], break_reg[1]); if (break_state != 0) { dev_dbg(&port->dev, "%s - Enter break state requested\n", __func__); break_reg[0] &= ~CH341_NBREAK_BITS; break_reg[1] &= ~CH341_LCR_ENABLE_TX; } else { dev_dbg(&port->dev, "%s - Leave break state requested\n", __func__); break_reg[0] |= CH341_NBREAK_BITS; break_reg[1] |= CH341_LCR_ENABLE_TX; } dev_dbg(&port->dev, "%s - New ch341 break register contents - reg1: %x, reg2: %x\n", __func__, break_reg[0], break_reg[1]); reg_contents = get_unaligned_le16(break_reg); r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG, ch341_break_reg, reg_contents); if (r < 0) { dev_err(&port->dev, "%s - USB control write error (%d)\n", __func__, r); return r; } return 0; } static int ch341_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct ch341_private *priv = usb_get_serial_port_data(port); unsigned long flags; u8 control; spin_lock_irqsave(&priv->lock, flags); if (set & TIOCM_RTS) priv->mcr |= CH341_BIT_RTS; if (set & TIOCM_DTR) priv->mcr |= CH341_BIT_DTR; if (clear & TIOCM_RTS) priv->mcr &= ~CH341_BIT_RTS; if (clear & TIOCM_DTR) priv->mcr &= ~CH341_BIT_DTR; control = priv->mcr; spin_unlock_irqrestore(&priv->lock, flags); return ch341_set_handshake(port->serial->dev, control); } static void ch341_update_status(struct usb_serial_port *port, unsigned char *data, size_t len) { struct ch341_private *priv = usb_get_serial_port_data(port); struct tty_struct *tty; unsigned long flags; u8 status; u8 delta; if (len < 4) return; status = ~data[2] & CH341_BITS_MODEM_STAT; spin_lock_irqsave(&priv->lock, flags); delta = status ^ priv->msr; priv->msr = status; spin_unlock_irqrestore(&priv->lock, flags); if (data[1] & CH341_MULT_STAT) dev_dbg(&port->dev, "%s - multiple status change\n", __func__); if (!delta) return; if (delta & CH341_BIT_CTS) port->icount.cts++; if (delta & CH341_BIT_DSR) port->icount.dsr++; if (delta & CH341_BIT_RI) port->icount.rng++; if (delta & CH341_BIT_DCD) { port->icount.dcd++; tty = tty_port_tty_get(&port->port); if (tty) { usb_serial_handle_dcd_change(port, tty, status & CH341_BIT_DCD); tty_kref_put(tty); } } wake_up_interruptible(&port->port.delta_msr_wait); } static void ch341_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; unsigned char *data = urb->transfer_buffer; unsigned int len = urb->actual_length; int status; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n", __func__, urb->status); return; default: dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, urb->status); goto exit; } usb_serial_debug_data(&port->dev, __func__, len, data); ch341_update_status(port, data, len); exit: status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { dev_err(&urb->dev->dev, "%s - usb_submit_urb failed: %d\n", __func__, status); } } static int ch341_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct ch341_private *priv = usb_get_serial_port_data(port); unsigned long flags; u8 mcr; u8 status; unsigned int result; spin_lock_irqsave(&priv->lock, flags); mcr = priv->mcr; status = priv->msr; spin_unlock_irqrestore(&priv->lock, flags); result = ((mcr & CH341_BIT_DTR) ? TIOCM_DTR : 0) | ((mcr & CH341_BIT_RTS) ? TIOCM_RTS : 0) | ((status & CH341_BIT_CTS) ? TIOCM_CTS : 0) | ((status & CH341_BIT_DSR) ? TIOCM_DSR : 0) | ((status & CH341_BIT_RI) ? TIOCM_RI : 0) | ((status & CH341_BIT_DCD) ? TIOCM_CD : 0); dev_dbg(&port->dev, "%s - result = %x\n", __func__, result); return result; } static int ch341_reset_resume(struct usb_serial *serial) { struct usb_serial_port *port = serial->port[0]; struct ch341_private *priv; int ret; priv = usb_get_serial_port_data(port); if (!priv) return 0; /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); if (tty_port_initialized(&port->port)) { ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); if (ret) { dev_err(&port->dev, "failed to submit interrupt urb: %d\n", ret); return ret; } ret = ch341_get_status(port->serial->dev, priv); if (ret < 0) { dev_err(&port->dev, "failed to read modem status: %d\n", ret); } } return usb_serial_generic_resume(serial); } static struct usb_serial_driver ch341_device = { .driver = { .name = "ch341-uart", }, .id_table = id_table, .num_ports = 1, .open = ch341_open, .dtr_rts = ch341_dtr_rts, .carrier_raised = ch341_carrier_raised, .close = ch341_close, .set_termios = ch341_set_termios, .break_ctl = ch341_break_ctl, .tiocmget = ch341_tiocmget, .tiocmset = ch341_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .read_int_callback = ch341_read_int_callback, .port_probe = ch341_port_probe, .port_remove = ch341_port_remove, .reset_resume = ch341_reset_resume, }; static struct usb_serial_driver * const serial_drivers[] = { &ch341_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION("Winchiphead CH341 USB Serial driver"); MODULE_LICENSE("GPL v2");
2 2 2 1 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the po1030 sensor * * Copyright (c) 2008 Erik Andrén * Copyright (c) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project. * Copyright (c) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br> * * Portions of code to USB interface and ALi driver software, * Copyright (c) 2006 Willem Duinker * v4l2 interface modeled after the V4L2 driver * for SN9C10x PC Camera Controllers */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "m5602_po1030.h" static int po1030_s_ctrl(struct v4l2_ctrl *ctrl); static void po1030_dump_registers(struct sd *sd); static const unsigned char preinit_po1030[][3] = { {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, {BRIDGE, M5602_XB_GPIO_DAT, 0x04}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02}, {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c}, {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, {BRIDGE, M5602_XB_GPIO_DAT, 0x00} }; static const unsigned char init_po1030[][3] = { {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02}, {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00}, {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c}, {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)}, {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, {BRIDGE, M5602_XB_GPIO_DAT, 0x04}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06}, {BRIDGE, M5602_XB_GPIO_EN_L, 0x00}, {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06}, {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02}, {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, {BRIDGE, M5602_XB_GPIO_DAT, 0x00}, {SENSOR, PO1030_AUTOCTRL2, 0x04}, {SENSOR, PO1030_OUTFORMCTRL2, PO1030_RAW_RGB_BAYER}, {SENSOR, PO1030_AUTOCTRL1, PO1030_WEIGHT_WIN_2X}, {SENSOR, PO1030_CONTROL2, 0x03}, {SENSOR, 0x21, 0x90}, {SENSOR, PO1030_YTARGET, 0x60}, {SENSOR, 0x59, 0x13}, {SENSOR, PO1030_OUTFORMCTRL1, PO1030_HREF_ENABLE}, {SENSOR, PO1030_EDGE_ENH_OFF, 0x00}, {SENSOR, PO1030_EGA, 0x80}, {SENSOR, 0x78, 0x14}, {SENSOR, 0x6f, 0x01}, {SENSOR, PO1030_GLOBALGAINMAX, 0x14}, {SENSOR, PO1030_Cb_U_GAIN, 0x38}, {SENSOR, PO1030_Cr_V_GAIN, 0x38}, {SENSOR, PO1030_CONTROL1, PO1030_SHUTTER_MODE | PO1030_AUTO_SUBSAMPLING | PO1030_FRAME_EQUAL}, {SENSOR, PO1030_GC0, 0x10}, {SENSOR, PO1030_GC1, 0x20}, {SENSOR, PO1030_GC2, 0x40}, {SENSOR, PO1030_GC3, 0x60}, {SENSOR, PO1030_GC4, 0x80}, {SENSOR, PO1030_GC5, 0xa0}, {SENSOR, PO1030_GC6, 0xc0}, {SENSOR, PO1030_GC7, 0xff}, /* Set the width to 751 */ {SENSOR, PO1030_FRAMEWIDTH_H, 0x02}, {SENSOR, PO1030_FRAMEWIDTH_L, 0xef}, /* Set the height to 540 */ {SENSOR, PO1030_FRAMEHEIGHT_H, 0x02}, {SENSOR, PO1030_FRAMEHEIGHT_L, 0x1c}, /* Set the x window to 1 */ {SENSOR, PO1030_WINDOWX_H, 0x00}, {SENSOR, PO1030_WINDOWX_L, 0x01}, /* Set the y window to 1 */ {SENSOR, PO1030_WINDOWY_H, 0x00}, {SENSOR, PO1030_WINDOWY_L, 0x01}, /* with a very low lighted environment increase the exposure but * decrease the FPS (Frame Per Second) */ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, {BRIDGE, M5602_XB_GPIO_DAT, 0x00}, {BRIDGE, M5602_XB_GPIO_EN_H, 0x06}, {BRIDGE, M5602_XB_GPIO_EN_L, 0x00}, }; static struct v4l2_pix_format po1030_modes[] = { { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .sizeimage = 640 * 480, .bytesperline = 640, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 } }; static const struct v4l2_ctrl_ops po1030_ctrl_ops = { .s_ctrl = po1030_s_ctrl, }; static const struct v4l2_ctrl_config po1030_greenbal_cfg = { .ops = &po1030_ctrl_ops, .id = M5602_V4L2_CID_GREEN_BALANCE, .name = "Green Balance", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 255, .step = 1, .def = PO1030_GREEN_GAIN_DEFAULT, .flags = V4L2_CTRL_FLAG_SLIDER, }; int po1030_probe(struct sd *sd) { u8 dev_id_h = 0, i; int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if (force_sensor) { if (force_sensor == PO1030_SENSOR) { pr_info("Forcing a %s sensor\n", po1030.name); goto sensor_found; } /* If we want to force another sensor, don't try to probe this * one */ return -ENODEV; } gspca_dbg(gspca_dev, D_PROBE, "Probing for a po1030 sensor\n"); /* Run the pre-init to actually probe the unit */ for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { u8 data = preinit_po1030[i][2]; if (preinit_po1030[i][0] == SENSOR) err = m5602_write_sensor(sd, preinit_po1030[i][1], &data, 1); else err = m5602_write_bridge(sd, preinit_po1030[i][1], data); if (err < 0) return err; } if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1)) return -ENODEV; if (dev_id_h == 0x30) { pr_info("Detected a po1030 sensor\n"); goto sensor_found; } return -ENODEV; sensor_found: sd->gspca_dev.cam.cam_mode = po1030_modes; sd->gspca_dev.cam.nmodes = ARRAY_SIZE(po1030_modes); return 0; } int po1030_init(struct sd *sd) { int i, err = 0; /* Init the sensor */ for (i = 0; i < ARRAY_SIZE(init_po1030) && !err; i++) { u8 data[2] = {0x00, 0x00}; switch (init_po1030[i][0]) { case BRIDGE: err = m5602_write_bridge(sd, init_po1030[i][1], init_po1030[i][2]); break; case SENSOR: data[0] = init_po1030[i][2]; err = m5602_write_sensor(sd, init_po1030[i][1], data, 1); break; default: pr_info("Invalid stream command, exiting init\n"); return -EINVAL; } } if (err < 0) return err; if (dump_sensor) po1030_dump_registers(sd); return 0; } int po1030_init_controls(struct sd *sd) { struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler; sd->gspca_dev.vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 9); sd->auto_white_bal = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 0); sd->green_bal = v4l2_ctrl_new_custom(hdl, &po1030_greenbal_cfg, NULL); sd->red_bal = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 255, 1, PO1030_RED_GAIN_DEFAULT); sd->blue_bal = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 255, 1, PO1030_BLUE_GAIN_DEFAULT); sd->autoexpo = v4l2_ctrl_new_std_menu(hdl, &po1030_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, V4L2_EXPOSURE_MANUAL); sd->expo = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_EXPOSURE, 0, 0x2ff, 1, PO1030_EXPOSURE_DEFAULT); sd->gain = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_GAIN, 0, 0x4f, 1, PO1030_GLOBAL_GAIN_DEFAULT); sd->hflip = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &po1030_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_auto_cluster(4, &sd->auto_white_bal, 0, false); v4l2_ctrl_auto_cluster(2, &sd->autoexpo, 0, false); v4l2_ctrl_cluster(2, &sd->hflip); return 0; } int po1030_start(struct sd *sd) { struct cam *cam = &sd->gspca_dev.cam; int i, err = 0; int width = cam->cam_mode[sd->gspca_dev.curr_mode].width; int height = cam->cam_mode[sd->gspca_dev.curr_mode].height; int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv; u8 data; switch (width) { case 320: data = PO1030_SUBSAMPLING; err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1); if (err < 0) return err; data = ((width + 3) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1); if (err < 0) return err; data = (width + 3) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1); if (err < 0) return err; data = ((height + 1) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1); if (err < 0) return err; data = (height + 1) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1); height += 6; width -= 1; break; case 640: data = 0; err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1); if (err < 0) return err; data = ((width + 7) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1); if (err < 0) return err; data = (width + 7) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1); if (err < 0) return err; data = ((height + 3) >> 8) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1); if (err < 0) return err; data = (height + 3) & 0xff; err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1); height += 12; width -= 2; break; } err = m5602_write_bridge(sd, M5602_XB_SENSOR_TYPE, 0x0c); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_LINE_OF_FRAME_H, 0x81); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_PIX_OF_LINE_H, 0x82); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0x01); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, ((ver_offs >> 8) & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (ver_offs & 0xff)); if (err < 0) return err; for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff)); if (err < 0) return err; for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0); for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); for (i = 0; i < 2 && !err; i++) err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width >> 8) & 0xff); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width & 0xff)); if (err < 0) return err; err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0); return err; } static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; gspca_dbg(gspca_dev, D_CONF, "Set exposure to %d\n", val & 0xffff); i2c_data = ((val & 0xff00) >> 8); gspca_dbg(gspca_dev, D_CONF, "Set exposure to high byte to 0x%x\n", i2c_data); err = m5602_write_sensor(sd, PO1030_INTEGLINES_H, &i2c_data, 1); if (err < 0) return err; i2c_data = (val & 0xff); gspca_dbg(gspca_dev, D_CONF, "Set exposure to low byte to 0x%x\n", i2c_data); err = m5602_write_sensor(sd, PO1030_INTEGLINES_M, &i2c_data, 1); return err; } static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; gspca_dbg(gspca_dev, D_CONF, "Set global gain to %d\n", i2c_data); err = m5602_write_sensor(sd, PO1030_GLOBALGAIN, &i2c_data, 1); return err; } static int po1030_set_hvflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; gspca_dbg(gspca_dev, D_CONF, "Set hvflip %d %d\n", sd->hflip->val, sd->vflip->val); err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); if (err < 0) return err; i2c_data = (0x3f & i2c_data) | (sd->hflip->val << 7) | (sd->vflip->val << 6); err = m5602_write_sensor(sd, PO1030_CONTROL2, &i2c_data, 1); return err; } static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; gspca_dbg(gspca_dev, D_CONF, "Set red gain to %d\n", i2c_data); err = m5602_write_sensor(sd, PO1030_RED_GAIN, &i2c_data, 1); return err; } static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; gspca_dbg(gspca_dev, D_CONF, "Set blue gain to %d\n", i2c_data); err = m5602_write_sensor(sd, PO1030_BLUE_GAIN, &i2c_data, 1); return err; } static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; i2c_data = val & 0xff; gspca_dbg(gspca_dev, D_CONF, "Set green gain to %d\n", i2c_data); err = m5602_write_sensor(sd, PO1030_GREEN_1_GAIN, &i2c_data, 1); if (err < 0) return err; return m5602_write_sensor(sd, PO1030_GREEN_2_GAIN, &i2c_data, 1); } static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); if (err < 0) return err; gspca_dbg(gspca_dev, D_CONF, "Set auto white balance to %d\n", val); i2c_data = (i2c_data & 0xfe) | (val & 0x01); err = m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); return err; } static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 i2c_data; int err; err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); if (err < 0) return err; gspca_dbg(gspca_dev, D_CONF, "Set auto exposure to %d\n", val); val = (val == V4L2_EXPOSURE_AUTO); i2c_data = (i2c_data & 0xfd) | ((val & 0x01) << 1); return m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1); } void po1030_disconnect(struct sd *sd) { sd->sensor = NULL; } static int po1030_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *) gspca_dev; int err; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: err = po1030_set_auto_white_balance(gspca_dev, ctrl->val); if (err || ctrl->val) return err; err = po1030_set_green_balance(gspca_dev, sd->green_bal->val); if (err) return err; err = po1030_set_red_balance(gspca_dev, sd->red_bal->val); if (err) return err; err = po1030_set_blue_balance(gspca_dev, sd->blue_bal->val); break; case V4L2_CID_EXPOSURE_AUTO: err = po1030_set_auto_exposure(gspca_dev, ctrl->val); if (err || ctrl->val == V4L2_EXPOSURE_AUTO) return err; err = po1030_set_exposure(gspca_dev, sd->expo->val); break; case V4L2_CID_GAIN: err = po1030_set_gain(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: err = po1030_set_hvflip(gspca_dev); break; default: return -EINVAL; } return err; } static void po1030_dump_registers(struct sd *sd) { int address; u8 value = 0; pr_info("Dumping the po1030 sensor core registers\n"); for (address = 0; address < 0x7f; address++) { m5602_read_sensor(sd, address, &value, 1); pr_info("register 0x%x contains 0x%x\n", address, value); } pr_info("po1030 register state dump complete\n"); pr_info("Probing for which registers that are read/write\n"); for (address = 0; address < 0xff; address++) { u8 old_value, ctrl_value; u8 test_value[2] = {0xff, 0xff}; m5602_read_sensor(sd, address, &old_value, 1); m5602_write_sensor(sd, address, test_value, 1); m5602_read_sensor(sd, address, &ctrl_value, 1); if (ctrl_value == test_value[0]) pr_info("register 0x%x is writeable\n", address); else pr_info("register 0x%x is read only\n", address); /* Restore original value */ m5602_write_sensor(sd, address, &old_value, 1); } }
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM spi #if !defined(_TRACE_SPI_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SPI_H #include <linux/ktime.h> #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(spi_controller, TP_PROTO(struct spi_controller *controller), TP_ARGS(controller), TP_STRUCT__entry( __field( int, bus_num ) ), TP_fast_assign( __entry->bus_num = controller->bus_num; ), TP_printk("spi%d", (int)__entry->bus_num) ); DEFINE_EVENT(spi_controller, spi_controller_idle, TP_PROTO(struct spi_controller *controller), TP_ARGS(controller) ); DEFINE_EVENT(spi_controller, spi_controller_busy, TP_PROTO(struct spi_controller *controller), TP_ARGS(controller) ); TRACE_EVENT(spi_setup, TP_PROTO(struct spi_device *spi, int status), TP_ARGS(spi, status), TP_STRUCT__entry( __field(int, bus_num) __field(int, chip_select) __field(unsigned long, mode) __field(unsigned int, bits_per_word) __field(unsigned int, max_speed_hz) __field(int, status) ), TP_fast_assign( __entry->bus_num = spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(spi, 0); __entry->mode = spi->mode; __entry->bits_per_word = spi->bits_per_word; __entry->max_speed_hz = spi->max_speed_hz; __entry->status = status; ), TP_printk("spi%d.%d setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d", __entry->bus_num, __entry->chip_select, (__entry->mode & SPI_MODE_X_MASK), (__entry->mode & SPI_CS_HIGH) ? "cs_high, " : "", (__entry->mode & SPI_LSB_FIRST) ? "lsb, " : "", (__entry->mode & SPI_3WIRE) ? "3wire, " : "", (__entry->mode & SPI_LOOP) ? "loopback, " : "", __entry->bits_per_word, __entry->max_speed_hz, __entry->status) ); TRACE_EVENT(spi_set_cs, TP_PROTO(struct spi_device *spi, bool enable), TP_ARGS(spi, enable), TP_STRUCT__entry( __field(int, bus_num) __field(int, chip_select) __field(unsigned long, mode) __field(bool, enable) ), TP_fast_assign( __entry->bus_num = spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(spi, 0); __entry->mode = spi->mode; __entry->enable = enable; ), TP_printk("spi%d.%d %s%s", __entry->bus_num, __entry->chip_select, __entry->enable ? "activate" : "deactivate", (__entry->mode & SPI_CS_HIGH) ? ", cs_high" : "") ); DECLARE_EVENT_CLASS(spi_message, TP_PROTO(struct spi_message *msg), TP_ARGS(msg), TP_STRUCT__entry( __field( int, bus_num ) __field( int, chip_select ) __field( struct spi_message *, msg ) ), TP_fast_assign( __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(msg->spi, 0); __entry->msg = msg; ), TP_printk("spi%d.%d %p", (int)__entry->bus_num, (int)__entry->chip_select, (struct spi_message *)__entry->msg) ); DEFINE_EVENT(spi_message, spi_message_submit, TP_PROTO(struct spi_message *msg), TP_ARGS(msg) ); DEFINE_EVENT(spi_message, spi_message_start, TP_PROTO(struct spi_message *msg), TP_ARGS(msg) ); TRACE_EVENT(spi_message_done, TP_PROTO(struct spi_message *msg), TP_ARGS(msg), TP_STRUCT__entry( __field( int, bus_num ) __field( int, chip_select ) __field( struct spi_message *, msg ) __field( unsigned, frame ) __field( unsigned, actual ) ), TP_fast_assign( __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(msg->spi, 0); __entry->msg = msg; __entry->frame = msg->frame_length; __entry->actual = msg->actual_length; ), TP_printk("spi%d.%d %p len=%u/%u", (int)__entry->bus_num, (int)__entry->chip_select, (struct spi_message *)__entry->msg, (unsigned)__entry->actual, (unsigned)__entry->frame) ); /* * Consider a buffer valid if non-NULL and if it doesn't match the dummy buffer * that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or * SPI_CONTROLLER_MUST_RX. */ #define spi_valid_txbuf(msg, xfer) \ (xfer->tx_buf && xfer->tx_buf != msg->spi->controller->dummy_tx) #define spi_valid_rxbuf(msg, xfer) \ (xfer->rx_buf && xfer->rx_buf != msg->spi->controller->dummy_rx) DECLARE_EVENT_CLASS(spi_transfer, TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), TP_ARGS(msg, xfer), TP_STRUCT__entry( __field( int, bus_num ) __field( int, chip_select ) __field( struct spi_transfer *, xfer ) __field( int, len ) __dynamic_array(u8, rx_buf, spi_valid_rxbuf(msg, xfer) ? (xfer->len < 64 ? xfer->len : 64) : 0) __dynamic_array(u8, tx_buf, spi_valid_txbuf(msg, xfer) ? (xfer->len < 64 ? xfer->len : 64) : 0) ), TP_fast_assign( __entry->bus_num = msg->spi->controller->bus_num; __entry->chip_select = spi_get_chipselect(msg->spi, 0); __entry->xfer = xfer; __entry->len = xfer->len; if (spi_valid_txbuf(msg, xfer)) memcpy(__get_dynamic_array(tx_buf), xfer->tx_buf, __get_dynamic_array_len(tx_buf)); if (spi_valid_rxbuf(msg, xfer)) memcpy(__get_dynamic_array(rx_buf), xfer->rx_buf, __get_dynamic_array_len(rx_buf)); ), TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]", __entry->bus_num, __entry->chip_select, __entry->xfer, __entry->len, __get_dynamic_array_len(tx_buf), __get_dynamic_array(tx_buf), __get_dynamic_array_len(rx_buf), __get_dynamic_array(rx_buf)) ); DEFINE_EVENT(spi_transfer, spi_transfer_start, TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), TP_ARGS(msg, xfer) ); DEFINE_EVENT(spi_transfer, spi_transfer_stop, TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer), TP_ARGS(msg, xfer) ); #endif /* _TRACE_POWER_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 /* * linux/drivers/video/fb_notify.c * * Copyright (C) 2006 Antonino Daplas <adaplas@pol.net> * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/fb.h> #include <linux/notifier.h> #include <linux/export.h> static BLOCKING_NOTIFIER_HEAD(fb_notifier_list); /** * fb_register_client - register a client notifier * @nb: notifier block to callback on events * * Return: 0 on success, negative error code on failure. */ int fb_register_client(struct notifier_block *nb) { return blocking_notifier_chain_register(&fb_notifier_list, nb); } EXPORT_SYMBOL(fb_register_client); /** * fb_unregister_client - unregister a client notifier * @nb: notifier block to callback on events * * Return: 0 on success, negative error code on failure. */ int fb_unregister_client(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&fb_notifier_list, nb); } EXPORT_SYMBOL(fb_unregister_client); /** * fb_notifier_call_chain - notify clients of fb_events * @val: value passed to callback * @v: pointer passed to callback * * Return: The return value of the last notifier function */ int fb_notifier_call_chain(unsigned long val, void *v) { return blocking_notifier_call_chain(&fb_notifier_list, val, v); } EXPORT_SYMBOL_GPL(fb_notifier_call_chain);
5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IEEE802154_CORE_H #define __IEEE802154_CORE_H #include <net/cfg802154.h> struct cfg802154_registered_device { const struct cfg802154_ops *ops; struct list_head list; /* wpan_phy index, internal only */ int wpan_phy_idx; /* also protected by devlist_mtx */ int opencount; wait_queue_head_t dev_wait; /* protected by RTNL only */ int num_running_ifaces; /* associated wpan interfaces, protected by rtnl or RCU */ struct list_head wpan_dev_list; int devlist_generation, wpan_dev_id; /* must be last because of the way we do wpan_phy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wpan_phy wpan_phy __aligned(NETDEV_ALIGN); }; static inline struct cfg802154_registered_device * wpan_phy_to_rdev(struct wpan_phy *wpan_phy) { BUG_ON(!wpan_phy); return container_of(wpan_phy, struct cfg802154_registered_device, wpan_phy); } extern struct list_head cfg802154_rdev_list; extern int cfg802154_rdev_list_generation; int cfg802154_switch_netns(struct cfg802154_registered_device *rdev, struct net *net); /* free object */ void cfg802154_dev_free(struct cfg802154_registered_device *rdev); struct cfg802154_registered_device * cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx); struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx); #endif /* __IEEE802154_CORE_H */
4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 2 4 4 4 4 4 2 2 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 // SPDX-License-Identifier: GPL-2.0 #define pr_fmt(fmt) "irq: " fmt #include <linux/acpi.h> #include <linux/debugfs.h> #include <linux/hardirq.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdesc.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/topology.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/fs.h> static LIST_HEAD(irq_domain_list); static DEFINE_MUTEX(irq_domain_mutex); static struct irq_domain *irq_default_domain; static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity); static void irq_domain_check_hierarchy(struct irq_domain *domain); static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq); struct irqchip_fwid { struct fwnode_handle fwnode; unsigned int type; char *name; phys_addr_t *pa; }; #ifdef CONFIG_GENERIC_IRQ_DEBUGFS static void debugfs_add_domain_dir(struct irq_domain *d); static void debugfs_remove_domain_dir(struct irq_domain *d); #else static inline void debugfs_add_domain_dir(struct irq_domain *d) { } static inline void debugfs_remove_domain_dir(struct irq_domain *d) { } #endif static const char *irqchip_fwnode_get_name(const struct fwnode_handle *fwnode) { struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode); return fwid->name; } const struct fwnode_operations irqchip_fwnode_ops = { .get_name = irqchip_fwnode_get_name, }; EXPORT_SYMBOL_GPL(irqchip_fwnode_ops); /** * __irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for * identifying an irq domain * @type: Type of irqchip_fwnode. See linux/irqdomain.h * @id: Optional user provided id if name != NULL * @name: Optional user provided domain name * @pa: Optional user-provided physical address * * Allocate a struct irqchip_fwid, and return a pointer to the embedded * fwnode_handle (or NULL on failure). * * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are * solely to transport name information to irqdomain creation code. The * node is not stored. For other types the pointer is kept in the irq * domain struct. */ struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, const char *name, phys_addr_t *pa) { struct irqchip_fwid *fwid; char *n; fwid = kzalloc(sizeof(*fwid), GFP_KERNEL); switch (type) { case IRQCHIP_FWNODE_NAMED: n = kasprintf(GFP_KERNEL, "%s", name); break; case IRQCHIP_FWNODE_NAMED_ID: n = kasprintf(GFP_KERNEL, "%s-%d", name, id); break; default: n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa); break; } if (!fwid || !n) { kfree(fwid); kfree(n); return NULL; } fwid->type = type; fwid->name = n; fwid->pa = pa; fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops); return &fwid->fwnode; } EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode); /** * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle * @fwnode: fwnode_handle to free * * Free a fwnode_handle allocated with irq_domain_alloc_fwnode. */ void irq_domain_free_fwnode(struct fwnode_handle *fwnode) { struct irqchip_fwid *fwid; if (!fwnode || WARN_ON(!is_fwnode_irqchip(fwnode))) return; fwid = container_of(fwnode, struct irqchip_fwid, fwnode); kfree(fwid->name); kfree(fwid); } EXPORT_SYMBOL_GPL(irq_domain_free_fwnode); static int alloc_name(struct irq_domain *domain, char *base, enum irq_domain_bus_token bus_token) { if (bus_token == DOMAIN_BUS_ANY) domain->name = kasprintf(GFP_KERNEL, "%s", base); else domain->name = kasprintf(GFP_KERNEL, "%s-%d", base, bus_token); if (!domain->name) return -ENOMEM; domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; return 0; } static int alloc_fwnode_name(struct irq_domain *domain, const struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token, const char *suffix) { const char *sep = suffix ? "-" : ""; const char *suf = suffix ? : ""; char *name; if (bus_token == DOMAIN_BUS_ANY) name = kasprintf(GFP_KERNEL, "%pfw%s%s", fwnode, sep, suf); else name = kasprintf(GFP_KERNEL, "%pfw%s%s-%d", fwnode, sep, suf, bus_token); if (!name) return -ENOMEM; /* * fwnode paths contain '/', which debugfs is legitimately unhappy * about. Replace them with ':', which does the trick and is not as * offensive as '\'... */ domain->name = strreplace(name, '/', ':'); domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; return 0; } static int alloc_unknown_name(struct irq_domain *domain, enum irq_domain_bus_token bus_token) { static atomic_t unknown_domains; int id = atomic_inc_return(&unknown_domains); if (bus_token == DOMAIN_BUS_ANY) domain->name = kasprintf(GFP_KERNEL, "unknown-%d", id); else domain->name = kasprintf(GFP_KERNEL, "unknown-%d-%d", id, bus_token); if (!domain->name) return -ENOMEM; domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; return 0; } static int irq_domain_set_name(struct irq_domain *domain, const struct irq_domain_info *info) { enum irq_domain_bus_token bus_token = info->bus_token; const struct fwnode_handle *fwnode = info->fwnode; if (is_fwnode_irqchip(fwnode)) { struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode); /* * The name_suffix is only intended to be used to avoid a name * collision when multiple domains are created for a single * device and the name is picked using a real device node. * (Typical use-case is regmap-IRQ controllers for devices * providing more than one physical IRQ.) There should be no * need to use name_suffix with irqchip-fwnode. */ if (info->name_suffix) return -EINVAL; switch (fwid->type) { case IRQCHIP_FWNODE_NAMED: case IRQCHIP_FWNODE_NAMED_ID: return alloc_name(domain, fwid->name, bus_token); default: domain->name = fwid->name; if (bus_token != DOMAIN_BUS_ANY) return alloc_name(domain, fwid->name, bus_token); } } else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) || is_software_node(fwnode)) { return alloc_fwnode_name(domain, fwnode, bus_token, info->name_suffix); } if (domain->name) return 0; if (fwnode) pr_err("Invalid fwnode type for irqdomain\n"); return alloc_unknown_name(domain, bus_token); } static struct irq_domain *__irq_domain_create(const struct irq_domain_info *info) { struct irq_domain *domain; int err; if (WARN_ON((info->size && info->direct_max) || (!IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && info->direct_max) || (info->direct_max && info->direct_max != info->hwirq_max))) return ERR_PTR(-EINVAL); domain = kzalloc_node(struct_size(domain, revmap, info->size), GFP_KERNEL, of_node_to_nid(to_of_node(info->fwnode))); if (!domain) return ERR_PTR(-ENOMEM); err = irq_domain_set_name(domain, info); if (err) { kfree(domain); return ERR_PTR(err); } domain->fwnode = fwnode_handle_get(info->fwnode); fwnode_dev_initialized(domain->fwnode, true); /* Fill structure */ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); domain->ops = info->ops; domain->host_data = info->host_data; domain->bus_token = info->bus_token; domain->hwirq_max = info->hwirq_max; if (info->direct_max) domain->flags |= IRQ_DOMAIN_FLAG_NO_MAP; domain->revmap_size = info->size; /* * Hierarchical domains use the domain lock of the root domain * (innermost domain). * * For non-hierarchical domains (as for root domains), the root * pointer is set to the domain itself so that &domain->root->mutex * always points to the right lock. */ mutex_init(&domain->mutex); domain->root = domain; irq_domain_check_hierarchy(domain); return domain; } static void __irq_domain_publish(struct irq_domain *domain) { mutex_lock(&irq_domain_mutex); debugfs_add_domain_dir(domain); list_add(&domain->link, &irq_domain_list); mutex_unlock(&irq_domain_mutex); pr_debug("Added domain %s\n", domain->name); } static void irq_domain_free(struct irq_domain *domain) { fwnode_dev_initialized(domain->fwnode, false); fwnode_handle_put(domain->fwnode); if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) kfree(domain->name); kfree(domain); } static void irq_domain_instantiate_descs(const struct irq_domain_info *info) { if (!IS_ENABLED(CONFIG_SPARSE_IRQ)) return; if (irq_alloc_descs(info->virq_base, info->virq_base, info->size, of_node_to_nid(to_of_node(info->fwnode))) < 0) { pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", info->virq_base); } } static struct irq_domain *__irq_domain_instantiate(const struct irq_domain_info *info, bool cond_alloc_descs, bool force_associate) { struct irq_domain *domain; int err; domain = __irq_domain_create(info); if (IS_ERR(domain)) return domain; domain->flags |= info->domain_flags; domain->exit = info->exit; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY if (info->parent) { domain->root = info->parent->root; domain->parent = info->parent; } #endif if (info->dgc_info) { err = irq_domain_alloc_generic_chips(domain, info->dgc_info); if (err) goto err_domain_free; } if (info->init) { err = info->init(domain); if (err) goto err_domain_gc_remove; } __irq_domain_publish(domain); if (cond_alloc_descs && info->virq_base > 0) irq_domain_instantiate_descs(info); /* * Legacy interrupt domains have a fixed Linux interrupt number * associated. Other interrupt domains can request association by * providing a Linux interrupt number > 0. */ if (force_associate || info->virq_base > 0) { irq_domain_associate_many(domain, info->virq_base, info->hwirq_base, info->size - info->hwirq_base); } return domain; err_domain_gc_remove: if (info->dgc_info) irq_domain_remove_generic_chips(domain); err_domain_free: irq_domain_free(domain); return ERR_PTR(err); } /** * irq_domain_instantiate() - Instantiate a new irq domain data structure * @info: Domain information pointer pointing to the information for this domain * * Return: A pointer to the instantiated irq domain or an ERR_PTR value. */ struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info) { return __irq_domain_instantiate(info, false, false); } EXPORT_SYMBOL_GPL(irq_domain_instantiate); /** * irq_domain_remove() - Remove an irq domain. * @domain: domain to remove * * This routine is used to remove an irq domain. The caller must ensure * that all mappings within the domain have been disposed of prior to * use, depending on the revmap type. */ void irq_domain_remove(struct irq_domain *domain) { if (domain->exit) domain->exit(domain); mutex_lock(&irq_domain_mutex); debugfs_remove_domain_dir(domain); WARN_ON(!radix_tree_empty(&domain->revmap_tree)); list_del(&domain->link); /* * If the going away domain is the default one, reset it. */ if (unlikely(irq_default_domain == domain)) irq_set_default_domain(NULL); mutex_unlock(&irq_domain_mutex); if (domain->flags & IRQ_DOMAIN_FLAG_DESTROY_GC) irq_domain_remove_generic_chips(domain); pr_debug("Removed domain %s\n", domain->name); irq_domain_free(domain); } EXPORT_SYMBOL_GPL(irq_domain_remove); void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token) { char *name; if (domain->bus_token == bus_token) return; mutex_lock(&irq_domain_mutex); domain->bus_token = bus_token; name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token); if (!name) { mutex_unlock(&irq_domain_mutex); return; } debugfs_remove_domain_dir(domain); if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) kfree(domain->name); else domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED; domain->name = name; debugfs_add_domain_dir(domain); mutex_unlock(&irq_domain_mutex); } EXPORT_SYMBOL_GPL(irq_domain_update_bus_token); /** * irq_domain_create_simple() - Register an irq_domain and optionally map a range of irqs * @fwnode: firmware node for the interrupt controller * @size: total number of irqs in mapping * @first_irq: first number of irq block assigned to the domain, * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then * pre-map all of the irqs in the domain to virqs starting at first_irq. * @ops: domain callbacks * @host_data: Controller private data pointer * * Allocates an irq_domain, and optionally if first_irq is positive then also * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq. * * This is intended to implement the expected behaviour for most * interrupt controllers. If device tree is used, then first_irq will be 0 and * irqs get mapped dynamically on the fly. However, if the controller requires * static virq assignments (non-DT boot) then it will set that up correctly. */ struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = fwnode, .size = size, .hwirq_max = size, .virq_base = first_irq, .ops = ops, .host_data = host_data, }; struct irq_domain *domain = __irq_domain_instantiate(&info, true, false); return IS_ERR(domain) ? NULL : domain; } EXPORT_SYMBOL_GPL(irq_domain_create_simple); /** * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: total number of irqs in legacy mapping * @first_irq: first number of irq block assigned to the domain * @first_hwirq: first hwirq number to use for the translation. Should normally * be '0', but a positive integer can be used if the effective * hwirqs numbering does not begin at zero. * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer * * Note: the map() callback will be called before this function returns * for all legacy interrupts except 0 (which is always the invalid irq for * a legacy controller). */ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data) { return irq_domain_create_legacy(of_node_to_fwnode(of_node), size, first_irq, first_hwirq, ops, host_data); } EXPORT_SYMBOL_GPL(irq_domain_add_legacy); struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = fwnode, .size = first_hwirq + size, .hwirq_max = first_hwirq + size, .hwirq_base = first_hwirq, .virq_base = first_irq, .ops = ops, .host_data = host_data, }; struct irq_domain *domain = __irq_domain_instantiate(&info, false, true); return IS_ERR(domain) ? NULL : domain; } EXPORT_SYMBOL_GPL(irq_domain_create_legacy); /** * irq_find_matching_fwspec() - Locates a domain for a given fwspec * @fwspec: FW specifier for an interrupt * @bus_token: domain-specific data */ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token) { struct irq_domain *h, *found = NULL; struct fwnode_handle *fwnode = fwspec->fwnode; int rc; /* * We might want to match the legacy controller last since * it might potentially be set to match all interrupts in * the absence of a device node. This isn't a problem so far * yet though... * * bus_token == DOMAIN_BUS_ANY matches any domain, any other * values must generate an exact match for the domain to be * selected. */ mutex_lock(&irq_domain_mutex); list_for_each_entry(h, &irq_domain_list, link) { if (h->ops->select && bus_token != DOMAIN_BUS_ANY) rc = h->ops->select(h, fwspec, bus_token); else if (h->ops->match) rc = h->ops->match(h, to_of_node(fwnode), bus_token); else rc = ((fwnode != NULL) && (h->fwnode == fwnode) && ((bus_token == DOMAIN_BUS_ANY) || (h->bus_token == bus_token))); if (rc) { found = h; break; } } mutex_unlock(&irq_domain_mutex); return found; } EXPORT_SYMBOL_GPL(irq_find_matching_fwspec); /** * irq_set_default_domain() - Set a "default" irq domain * @domain: default domain pointer * * For convenience, it's possible to set a "default" domain that will be used * whenever NULL is passed to irq_create_mapping(). It makes life easier for * platforms that want to manipulate a few hard coded interrupt numbers that * aren't properly represented in the device-tree. */ void irq_set_default_domain(struct irq_domain *domain) { pr_debug("Default domain set to @0x%p\n", domain); irq_default_domain = domain; } EXPORT_SYMBOL_GPL(irq_set_default_domain); /** * irq_get_default_domain() - Retrieve the "default" irq domain * * Returns: the default domain, if any. * * Modern code should never use this. This should only be used on * systems that cannot implement a firmware->fwnode mapping (which * both DT and ACPI provide). */ struct irq_domain *irq_get_default_domain(void) { return irq_default_domain; } EXPORT_SYMBOL_GPL(irq_get_default_domain); static bool irq_domain_is_nomap(struct irq_domain *domain) { return IS_ENABLED(CONFIG_IRQ_DOMAIN_NOMAP) && (domain->flags & IRQ_DOMAIN_FLAG_NO_MAP); } static void irq_domain_clear_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { lockdep_assert_held(&domain->root->mutex); if (irq_domain_is_nomap(domain)) return; if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], NULL); else radix_tree_delete(&domain->revmap_tree, hwirq); } static void irq_domain_set_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_data *irq_data) { /* * This also makes sure that all domains point to the same root when * called from irq_domain_insert_irq() for each domain in a hierarchy. */ lockdep_assert_held(&domain->root->mutex); if (irq_domain_is_nomap(domain)) return; if (hwirq < domain->revmap_size) rcu_assign_pointer(domain->revmap[hwirq], irq_data); else radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); } static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) { struct irq_data *irq_data = irq_get_irq_data(irq); irq_hw_number_t hwirq; if (WARN(!irq_data || irq_data->domain != domain, "virq%i doesn't exist; cannot disassociate\n", irq)) return; hwirq = irq_data->hwirq; mutex_lock(&domain->root->mutex); irq_set_status_flags(irq, IRQ_NOREQUEST); /* remove chip and handler */ irq_set_chip_and_handler(irq, NULL, NULL); /* Make sure it's completed */ synchronize_irq(irq); /* Tell the PIC about it */ if (domain->ops->unmap) domain->ops->unmap(domain, irq); smp_mb(); irq_data->domain = NULL; irq_data->hwirq = 0; domain->mapcount--; /* Clear reverse map for this hwirq */ irq_domain_clear_mapping(domain, hwirq); mutex_unlock(&domain->root->mutex); } static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { struct irq_data *irq_data = irq_get_irq_data(virq); int ret; if (WARN(hwirq >= domain->hwirq_max, "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) return -EINVAL; if (WARN(!irq_data, "error: virq%i is not allocated", virq)) return -EINVAL; if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) return -EINVAL; irq_data->hwirq = hwirq; irq_data->domain = domain; if (domain->ops->map) { ret = domain->ops->map(domain, virq, hwirq); if (ret != 0) { /* * If map() returns -EPERM, this interrupt is protected * by the firmware or some other service and shall not * be mapped. Don't bother telling the user about it. */ if (ret != -EPERM) { pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n", domain->name, hwirq, virq, ret); } irq_data->domain = NULL; irq_data->hwirq = 0; return ret; } } domain->mapcount++; irq_domain_set_mapping(domain, hwirq, irq_data); irq_clear_status_flags(virq, IRQ_NOREQUEST); return 0; } int irq_domain_associate(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { int ret; mutex_lock(&domain->root->mutex); ret = irq_domain_associate_locked(domain, virq, hwirq); mutex_unlock(&domain->root->mutex); return ret; } EXPORT_SYMBOL_GPL(irq_domain_associate); void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count) { struct device_node *of_node; int i; of_node = irq_domain_get_of_node(domain); pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__, of_node_full_name(of_node), irq_base, (int)hwirq_base, count); for (i = 0; i < count; i++) irq_domain_associate(domain, irq_base + i, hwirq_base + i); } EXPORT_SYMBOL_GPL(irq_domain_associate_many); #ifdef CONFIG_IRQ_DOMAIN_NOMAP /** * irq_create_direct_mapping() - Allocate an irq for direct mapping * @domain: domain to allocate the irq for or NULL for default domain * * This routine is used for irq controllers which can choose the hardware * interrupt numbers they generate. In such a case it's simplest to use * the linux irq as the hardware interrupt number. It still uses the linear * or radix tree to store the mapping, but the irq controller can optimize * the revmap path by using the hwirq directly. */ unsigned int irq_create_direct_mapping(struct irq_domain *domain) { struct device_node *of_node; unsigned int virq; if (domain == NULL) domain = irq_default_domain; of_node = irq_domain_get_of_node(domain); virq = irq_alloc_desc_from(1, of_node_to_nid(of_node)); if (!virq) { pr_debug("create_direct virq allocation failed\n"); return 0; } if (virq >= domain->hwirq_max) { pr_err("ERROR: no free irqs available below %lu maximum\n", domain->hwirq_max); irq_free_desc(virq); return 0; } pr_debug("create_direct obtained virq %d\n", virq); if (irq_domain_associate(domain, virq, virq)) { irq_free_desc(virq); return 0; } return virq; } EXPORT_SYMBOL_GPL(irq_create_direct_mapping); #endif static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain, irq_hw_number_t hwirq, const struct irq_affinity_desc *affinity) { struct device_node *of_node = irq_domain_get_of_node(domain); int virq; pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); /* Allocate a virtual interrupt number */ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), affinity); if (virq <= 0) { pr_debug("-> virq allocation failed\n"); return 0; } if (irq_domain_associate_locked(domain, virq, hwirq)) { irq_free_desc(virq); return 0; } pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", hwirq, of_node_full_name(of_node), virq); return virq; } /** * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space * @domain: domain owning this hardware interrupt or NULL for default domain * @hwirq: hardware irq number in that domain space * @affinity: irq affinity * * Only one mapping per hardware interrupt is permitted. Returns a linux * irq number. * If the sense/trigger is to be specified, set_irq_type() should be called * on the number returned from that call. */ unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq, const struct irq_affinity_desc *affinity) { int virq; /* Look for default domain if necessary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) { WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); return 0; } mutex_lock(&domain->root->mutex); /* Check if mapping already exists */ virq = irq_find_mapping(domain, hwirq); if (virq) { pr_debug("existing mapping on virq %d\n", virq); goto out; } virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity); out: mutex_unlock(&domain->root->mutex); return virq; } EXPORT_SYMBOL_GPL(irq_create_mapping_affinity); static int irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, irq_hw_number_t *hwirq, unsigned int *type) { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY if (d->ops->translate) return d->ops->translate(d, fwspec, hwirq, type); #endif if (d->ops->xlate) return d->ops->xlate(d, to_of_node(fwspec->fwnode), fwspec->param, fwspec->param_count, hwirq, type); /* If domain has no translation, then we assume interrupt line */ *hwirq = fwspec->param[0]; return 0; } void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, unsigned int count, struct irq_fwspec *fwspec) { int i; fwspec->fwnode = of_node_to_fwnode(np); fwspec->param_count = count; for (i = 0; i < count; i++) fwspec->param[i] = args[i]; } EXPORT_SYMBOL_GPL(of_phandle_args_to_fwspec); unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) { struct irq_domain *domain; struct irq_data *irq_data; irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; int virq; if (fwspec->fwnode) { domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED); if (!domain) domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY); } else { domain = irq_default_domain; } if (!domain) { pr_warn("no irq domain found for %s !\n", of_node_full_name(to_of_node(fwspec->fwnode))); return 0; } if (irq_domain_translate(domain, fwspec, &hwirq, &type)) return 0; /* * WARN if the irqchip returns a type with bits * outside the sense mask set and clear these bits. */ if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) type &= IRQ_TYPE_SENSE_MASK; mutex_lock(&domain->root->mutex); /* * If we've already configured this interrupt, * don't do it again, or hell will break loose. */ virq = irq_find_mapping(domain, hwirq); if (virq) { /* * If the trigger type is not specified or matches the * current trigger type then we are done so return the * interrupt number. */ if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) goto out; /* * If the trigger type has not been set yet, then set * it now and return the interrupt number. */ if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { irq_data = irq_get_irq_data(virq); if (!irq_data) { virq = 0; goto out; } irqd_set_trigger_type(irq_data, type); goto out; } pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); virq = 0; goto out; } if (irq_domain_is_hierarchy(domain)) { if (irq_domain_is_msi_device(domain)) { mutex_unlock(&domain->root->mutex); virq = msi_device_domain_alloc_wired(domain, hwirq, type); mutex_lock(&domain->root->mutex); } else virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, fwspec, false, NULL); if (virq <= 0) { virq = 0; goto out; } } else { /* Create mapping */ virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL); if (!virq) goto out; } irq_data = irq_get_irq_data(virq); if (WARN_ON(!irq_data)) { virq = 0; goto out; } /* Store trigger type */ irqd_set_trigger_type(irq_data, type); out: mutex_unlock(&domain->root->mutex); return virq; } EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) { struct irq_fwspec fwspec; of_phandle_args_to_fwspec(irq_data->np, irq_data->args, irq_data->args_count, &fwspec); return irq_create_fwspec_mapping(&fwspec); } EXPORT_SYMBOL_GPL(irq_create_of_mapping); /** * irq_dispose_mapping() - Unmap an interrupt * @virq: linux irq number of the interrupt to unmap */ void irq_dispose_mapping(unsigned int virq) { struct irq_data *irq_data; struct irq_domain *domain; irq_data = virq ? irq_get_irq_data(virq) : NULL; if (!irq_data) return; domain = irq_data->domain; if (WARN_ON(domain == NULL)) return; if (irq_domain_is_hierarchy(domain)) { irq_domain_free_one_irq(domain, virq); } else { irq_domain_disassociate(domain, virq); irq_free_desc(virq); } } EXPORT_SYMBOL_GPL(irq_dispose_mapping); /** * __irq_resolve_mapping() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space * @irq: optional pointer to return the Linux irq if required * * Returns the interrupt descriptor. */ struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, unsigned int *irq) { struct irq_desc *desc = NULL; struct irq_data *data; /* Look for default domain if necessary */ if (domain == NULL) domain = irq_default_domain; if (domain == NULL) return desc; if (irq_domain_is_nomap(domain)) { if (hwirq < domain->hwirq_max) { data = irq_domain_get_irq_data(domain, hwirq); if (data && data->hwirq == hwirq) desc = irq_data_to_desc(data); if (irq && desc) *irq = hwirq; } return desc; } rcu_read_lock(); /* Check if the hwirq is in the linear revmap. */ if (hwirq < domain->revmap_size) data = rcu_dereference(domain->revmap[hwirq]); else data = radix_tree_lookup(&domain->revmap_tree, hwirq); if (likely(data)) { desc = irq_data_to_desc(data); if (irq) *irq = data->irq; } rcu_read_unlock(); return desc; } EXPORT_SYMBOL_GPL(__irq_resolve_mapping); /** * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated * @intspec: The interrupt specifier data from the device tree * @intsize: The number of entries in @intspec * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with one cell * bindings where the cell value maps directly to the hwirq number. */ int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell); /** * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated * @intspec: The interrupt specifier data from the device tree * @intsize: The number of entries in @intspec * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with two cell * bindings where the cell values map directly to the hwirq number * and linux irq flags. */ int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type) { struct irq_fwspec fwspec; of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec); return irq_domain_translate_twocell(d, &fwspec, out_hwirq, out_type); } EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); /** * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings * @d: Interrupt domain involved in the translation * @ctrlr: The device tree node for the device whose interrupt is translated * @intspec: The interrupt specifier data from the device tree * @intsize: The number of entries in @intspec * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with either one * or two cell bindings where the cell values map directly to the hwirq number * and linux irq flags. * * Note: don't use this function unless your interrupt controller explicitly * supports both one and two cell bindings. For the majority of controllers * the _onecell() or _twocell() variants above should be used. */ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1)) return -EINVAL; *out_hwirq = intspec[0]; if (intsize > 1) *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; else *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); const struct irq_domain_ops irq_domain_simple_ops = { .xlate = irq_domain_xlate_onetwocell, }; EXPORT_SYMBOL_GPL(irq_domain_simple_ops); /** * irq_domain_translate_onecell() - Generic translate for direct one cell * bindings * @d: Interrupt domain involved in the translation * @fwspec: The firmware interrupt specifier to translate * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type */ int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(fwspec->param_count < 1)) return -EINVAL; *out_hwirq = fwspec->param[0]; *out_type = IRQ_TYPE_NONE; return 0; } EXPORT_SYMBOL_GPL(irq_domain_translate_onecell); /** * irq_domain_translate_twocell() - Generic translate for direct two cell * bindings * @d: Interrupt domain involved in the translation * @fwspec: The firmware interrupt specifier to translate * @out_hwirq: Pointer to storage for the hardware interrupt number * @out_type: Pointer to storage for the interrupt type * * Device Tree IRQ specifier translation function which works with two cell * bindings where the cell values map directly to the hwirq number * and linux irq flags. */ int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(fwspec->param_count < 2)) return -EINVAL; *out_hwirq = fwspec->param[0]; *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } EXPORT_SYMBOL_GPL(irq_domain_translate_twocell); int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity) { unsigned int hint; if (virq >= 0) { virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE, affinity); } else { hint = hwirq % irq_get_nr_irqs(); if (hint == 0) hint++; virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE, affinity); if (virq <= 0 && hint > 1) { virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE, affinity); } } return virq; } /** * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data * @irq_data: The pointer to irq_data */ void irq_domain_reset_irq_data(struct irq_data *irq_data) { irq_data->hwirq = 0; irq_data->chip = &no_irq_chip; irq_data->chip_data = NULL; } EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /** * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy * @parent: Parent irq domain to associate with the new domain * @flags: Irq domain flags associated to the domain * @size: Size of the domain. See below * @fwnode: Optional fwnode of the interrupt controller * @ops: Pointer to the interrupt domain callbacks * @host_data: Controller private data pointer * * If @size is 0 a tree domain is created, otherwise a linear domain. * * If successful the parent is associated to the new domain and the * domain flags are set. * Returns pointer to IRQ domain, or NULL on failure. */ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = fwnode, .size = size, .hwirq_max = size, .ops = ops, .host_data = host_data, .domain_flags = flags, .parent = parent, }; struct irq_domain *d; if (!info.size) info.hwirq_max = ~0U; d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy); static void irq_domain_insert_irq(int virq) { struct irq_data *data; for (data = irq_get_irq_data(virq); data; data = data->parent_data) { struct irq_domain *domain = data->domain; domain->mapcount++; irq_domain_set_mapping(domain, data->hwirq, data); } irq_clear_status_flags(virq, IRQ_NOREQUEST); } static void irq_domain_remove_irq(int virq) { struct irq_data *data; irq_set_status_flags(virq, IRQ_NOREQUEST); irq_set_chip_and_handler(virq, NULL, NULL); synchronize_irq(virq); smp_mb(); for (data = irq_get_irq_data(virq); data; data = data->parent_data) { struct irq_domain *domain = data->domain; irq_hw_number_t hwirq = data->hwirq; domain->mapcount--; irq_domain_clear_mapping(domain, hwirq); } } static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, struct irq_data *child) { struct irq_data *irq_data; irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, irq_data_get_node(child)); if (irq_data) { child->parent_data = irq_data; irq_data->irq = child->irq; irq_data->common = child->common; irq_data->domain = domain; } return irq_data; } static void __irq_domain_free_hierarchy(struct irq_data *irq_data) { struct irq_data *tmp; while (irq_data) { tmp = irq_data; irq_data = irq_data->parent_data; kfree(tmp); } } static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data, *tmp; int i; for (i = 0; i < nr_irqs; i++) { irq_data = irq_get_irq_data(virq + i); tmp = irq_data->parent_data; irq_data->parent_data = NULL; irq_data->domain = NULL; __irq_domain_free_hierarchy(tmp); } } /** * irq_domain_disconnect_hierarchy - Mark the first unused level of a hierarchy * @domain: IRQ domain from which the hierarchy is to be disconnected * @virq: IRQ number where the hierarchy is to be trimmed * * Marks the @virq level belonging to @domain as disconnected. * Returns -EINVAL if @virq doesn't have a valid irq_data pointing * to @domain. * * Its only use is to be able to trim levels of hierarchy that do not * have any real meaning for this interrupt, and that the driver marks * as such from its .alloc() callback. */ int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq) { struct irq_data *irqd; irqd = irq_domain_get_irq_data(domain, virq); if (!irqd) return -EINVAL; irqd->chip = ERR_PTR(-ENOTCONN); return 0; } EXPORT_SYMBOL_GPL(irq_domain_disconnect_hierarchy); static int irq_domain_trim_hierarchy(unsigned int virq) { struct irq_data *tail, *irqd, *irq_data; irq_data = irq_get_irq_data(virq); tail = NULL; /* The first entry must have a valid irqchip */ if (IS_ERR_OR_NULL(irq_data->chip)) return -EINVAL; /* * Validate that the irq_data chain is sane in the presence of * a hierarchy trimming marker. */ for (irqd = irq_data->parent_data; irqd; irq_data = irqd, irqd = irqd->parent_data) { /* Can't have a valid irqchip after a trim marker */ if (irqd->chip && tail) return -EINVAL; /* Can't have an empty irqchip before a trim marker */ if (!irqd->chip && !tail) return -EINVAL; if (IS_ERR(irqd->chip)) { /* Only -ENOTCONN is a valid trim marker */ if (PTR_ERR(irqd->chip) != -ENOTCONN) return -EINVAL; tail = irq_data; } } /* No trim marker, nothing to do */ if (!tail) return 0; pr_info("IRQ%d: trimming hierarchy from %s\n", virq, tail->parent_data->domain->name); /* Sever the inner part of the hierarchy... */ irqd = tail; tail = tail->parent_data; irqd->parent_data = NULL; __irq_domain_free_hierarchy(tail); return 0; } static int irq_domain_alloc_irq_data(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data; struct irq_domain *parent; int i; /* The outermost irq_data is embedded in struct irq_desc */ for (i = 0; i < nr_irqs; i++) { irq_data = irq_get_irq_data(virq + i); irq_data->domain = domain; for (parent = domain->parent; parent; parent = parent->parent) { irq_data = irq_domain_insert_irq_data(parent, irq_data); if (!irq_data) { irq_domain_free_irq_data(virq, i + 1); return -ENOMEM; } } } return 0; } /** * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain * @domain: domain to match * @virq: IRQ number to get irq_data */ struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq) { struct irq_data *irq_data; for (irq_data = irq_get_irq_data(virq); irq_data; irq_data = irq_data->parent_data) if (irq_data->domain == domain) return irq_data; return NULL; } EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); /** * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain * @domain: Interrupt domain to match * @virq: IRQ number * @hwirq: The hwirq number * @chip: The associated interrupt chip * @chip_data: The associated chip data */ int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data) { struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); if (!irq_data) return -ENOENT; irq_data->hwirq = hwirq; irq_data->chip = (struct irq_chip *)(chip ? chip : &no_irq_chip); irq_data->chip_data = chip_data; return 0; } EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip); /** * irq_domain_set_info - Set the complete data for a @virq in @domain * @domain: Interrupt domain to match * @virq: IRQ number * @hwirq: The hardware interrupt number * @chip: The associated interrupt chip * @chip_data: The associated interrupt chip data * @handler: The interrupt flow handler * @handler_data: The interrupt flow handler data * @handler_name: The interrupt handler name */ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name) { irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); __irq_set_handler(virq, handler, 0, handler_name); irq_set_handler_data(virq, handler_data); } EXPORT_SYMBOL(irq_domain_set_info); /** * irq_domain_free_irqs_common - Clear irq_data and free the parent * @domain: Interrupt domain to match * @virq: IRQ number to start with * @nr_irqs: The number of irqs to free */ void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *irq_data; int i; for (i = 0; i < nr_irqs; i++) { irq_data = irq_domain_get_irq_data(domain, virq + i); if (irq_data) irq_domain_reset_irq_data(irq_data); } irq_domain_free_irqs_parent(domain, virq, nr_irqs); } EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common); /** * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent * @domain: Interrupt domain to match * @virq: IRQ number to start with * @nr_irqs: The number of irqs to free */ void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { int i; for (i = 0; i < nr_irqs; i++) { irq_set_handler_data(virq + i, NULL); irq_set_handler(virq + i, NULL); } irq_domain_free_irqs_common(domain, virq, nr_irqs); } static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs) { unsigned int i; if (!domain->ops->free) return; for (i = 0; i < nr_irqs; i++) { if (irq_domain_get_irq_data(domain, irq_base + i)) domain->ops->free(domain, irq_base + i, 1); } } static int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { if (!domain->ops->alloc) { pr_debug("domain->ops->alloc() is NULL\n"); return -ENOSYS; } return domain->ops->alloc(domain, irq_base, nr_irqs, arg); } static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity) { int i, ret, virq; if (realloc && irq_base >= 0) { virq = irq_base; } else { virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, affinity); if (virq < 0) { pr_debug("cannot allocate IRQ(base %d, count %d)\n", irq_base, nr_irqs); return virq; } } if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { pr_debug("cannot allocate memory for IRQ%d\n", virq); ret = -ENOMEM; goto out_free_desc; } ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg); if (ret < 0) goto out_free_irq_data; for (i = 0; i < nr_irqs; i++) { ret = irq_domain_trim_hierarchy(virq + i); if (ret) goto out_free_irq_data; } for (i = 0; i < nr_irqs; i++) irq_domain_insert_irq(virq + i); return virq; out_free_irq_data: irq_domain_free_irq_data(virq, nr_irqs); out_free_desc: irq_free_descs(virq, nr_irqs); return ret; } /** * __irq_domain_alloc_irqs - Allocate IRQs from domain * @domain: domain to allocate from * @irq_base: allocate specified IRQ number if irq_base >= 0 * @nr_irqs: number of IRQs to allocate * @node: NUMA node id for memory allocation * @arg: domain specific argument * @realloc: IRQ descriptors have already been allocated if true * @affinity: Optional irq affinity mask for multiqueue devices * * Allocate IRQ numbers and initialized all data structures to support * hierarchy IRQ domains. * Parameter @realloc is mainly to support legacy IRQs. * Returns error code or allocated IRQ number * * The whole process to setup an IRQ has been split into two steps. * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ * descriptor and required hardware resources. The second step, * irq_domain_activate_irq(), is to program the hardware with preallocated * resources. In this way, it's easier to rollback when failing to * allocate resources. */ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity) { int ret; if (domain == NULL) { domain = irq_default_domain; if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) return -EINVAL; } mutex_lock(&domain->root->mutex); ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg, realloc, affinity); mutex_unlock(&domain->root->mutex); return ret; } EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs); /* The irq_data was moved, fix the revmap to refer to the new location */ static void irq_domain_fix_revmap(struct irq_data *d) { void __rcu **slot; lockdep_assert_held(&d->domain->root->mutex); if (irq_domain_is_nomap(d->domain)) return; /* Fix up the revmap. */ if (d->hwirq < d->domain->revmap_size) { /* Not using radix tree */ rcu_assign_pointer(d->domain->revmap[d->hwirq], d); } else { slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); if (slot) radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); } } /** * irq_domain_push_irq() - Push a domain in to the top of a hierarchy. * @domain: Domain to push. * @virq: Irq to push the domain in to. * @arg: Passed to the irq_domain_ops alloc() function. * * For an already existing irqdomain hierarchy, as might be obtained * via a call to pci_enable_msix(), add an additional domain to the * head of the processing chain. Must be called before request_irq() * has been called. */ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg) { struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *parent_irq_data; struct irq_desc *desc; int rv = 0; /* * Check that no action has been set, which indicates the virq * is in a state where this function doesn't have to deal with * races between interrupt handling and maintaining the * hierarchy. This will catch gross misuse. Attempting to * make the check race free would require holding locks across * calls to struct irq_domain_ops->alloc(), which could lead * to deadlock, so we just do a simple check before starting. */ desc = irq_to_desc(virq); if (!desc) return -EINVAL; if (WARN_ON(desc->action)) return -EBUSY; if (domain == NULL) return -EINVAL; if (WARN_ON(!irq_domain_is_hierarchy(domain))) return -EINVAL; if (!irq_data) return -EINVAL; if (domain->parent != irq_data->domain) return -EINVAL; parent_irq_data = kzalloc_node(sizeof(*parent_irq_data), GFP_KERNEL, irq_data_get_node(irq_data)); if (!parent_irq_data) return -ENOMEM; mutex_lock(&domain->root->mutex); /* Copy the original irq_data. */ *parent_irq_data = *irq_data; /* * Overwrite the irq_data, which is embedded in struct irq_desc, with * values for this domain. */ irq_data->parent_data = parent_irq_data; irq_data->domain = domain; irq_data->mask = 0; irq_data->hwirq = 0; irq_data->chip = NULL; irq_data->chip_data = NULL; /* May (probably does) set hwirq, chip, etc. */ rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); if (rv) { /* Restore the original irq_data. */ *irq_data = *parent_irq_data; kfree(parent_irq_data); goto error; } irq_domain_fix_revmap(parent_irq_data); irq_domain_set_mapping(domain, irq_data->hwirq, irq_data); error: mutex_unlock(&domain->root->mutex); return rv; } EXPORT_SYMBOL_GPL(irq_domain_push_irq); /** * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy. * @domain: Domain to remove. * @virq: Irq to remove the domain from. * * Undo the effects of a call to irq_domain_push_irq(). Must be * called either before request_irq() or after free_irq(). */ int irq_domain_pop_irq(struct irq_domain *domain, int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *parent_irq_data; struct irq_data *tmp_irq_data; struct irq_desc *desc; /* * Check that no action is set, which indicates the virq is in * a state where this function doesn't have to deal with races * between interrupt handling and maintaining the hierarchy. * This will catch gross misuse. Attempting to make the check * race free would require holding locks across calls to * struct irq_domain_ops->free(), which could lead to * deadlock, so we just do a simple check before starting. */ desc = irq_to_desc(virq); if (!desc) return -EINVAL; if (WARN_ON(desc->action)) return -EBUSY; if (domain == NULL) return -EINVAL; if (!irq_data) return -EINVAL; tmp_irq_data = irq_domain_get_irq_data(domain, virq); /* We can only "pop" if this domain is at the top of the list */ if (WARN_ON(irq_data != tmp_irq_data)) return -EINVAL; if (WARN_ON(irq_data->domain != domain)) return -EINVAL; parent_irq_data = irq_data->parent_data; if (WARN_ON(!parent_irq_data)) return -EINVAL; mutex_lock(&domain->root->mutex); irq_data->parent_data = NULL; irq_domain_clear_mapping(domain, irq_data->hwirq); irq_domain_free_irqs_hierarchy(domain, virq, 1); /* Restore the original irq_data. */ *irq_data = *parent_irq_data; irq_domain_fix_revmap(irq_data); mutex_unlock(&domain->root->mutex); kfree(parent_irq_data); return 0; } EXPORT_SYMBOL_GPL(irq_domain_pop_irq); /** * irq_domain_free_irqs - Free IRQ number and associated data structures * @virq: base IRQ number * @nr_irqs: number of IRQs to free */ void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { struct irq_data *data = irq_get_irq_data(virq); struct irq_domain *domain; int i; if (WARN(!data || !data->domain || !data->domain->ops->free, "NULL pointer, cannot free irq\n")) return; domain = data->domain; mutex_lock(&domain->root->mutex); for (i = 0; i < nr_irqs; i++) irq_domain_remove_irq(virq + i); irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs); mutex_unlock(&domain->root->mutex); irq_domain_free_irq_data(virq, nr_irqs); irq_free_descs(virq, nr_irqs); } static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) { if (irq_domain_is_msi_device(domain)) msi_device_domain_free_wired(domain, virq); else irq_domain_free_irqs(virq, 1); } /** * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain * @domain: Domain below which interrupts must be allocated * @irq_base: Base IRQ number * @nr_irqs: Number of IRQs to allocate * @arg: Allocation data (arch/domain specific) */ int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg) { if (!domain->parent) return -ENOSYS; return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base, nr_irqs, arg); } EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent); /** * irq_domain_free_irqs_parent - Free interrupts from parent domain * @domain: Domain below which interrupts must be freed * @irq_base: Base IRQ number * @nr_irqs: Number of IRQs to free */ void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs) { if (!domain->parent) return; irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs); } EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); static void __irq_domain_deactivate_irq(struct irq_data *irq_data) { if (irq_data && irq_data->domain) { struct irq_domain *domain = irq_data->domain; if (domain->ops->deactivate) domain->ops->deactivate(domain, irq_data); if (irq_data->parent_data) __irq_domain_deactivate_irq(irq_data->parent_data); } } static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve) { int ret = 0; if (irqd && irqd->domain) { struct irq_domain *domain = irqd->domain; if (irqd->parent_data) ret = __irq_domain_activate_irq(irqd->parent_data, reserve); if (!ret && domain->ops->activate) { ret = domain->ops->activate(domain, irqd, reserve); /* Rollback in case of error */ if (ret && irqd->parent_data) __irq_domain_deactivate_irq(irqd->parent_data); } } return ret; } /** * irq_domain_activate_irq - Call domain_ops->activate recursively to activate * interrupt * @irq_data: Outermost irq_data associated with interrupt * @reserve: If set only reserve an interrupt vector instead of assigning one * * This is the second step to call domain_ops->activate to program interrupt * controllers, so the interrupt could actually get delivered. */ int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve) { int ret = 0; if (!irqd_is_activated(irq_data)) ret = __irq_domain_activate_irq(irq_data, reserve); if (!ret) irqd_set_activated(irq_data); return ret; } /** * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to * deactivate interrupt * @irq_data: outermost irq_data associated with interrupt * * It calls domain_ops->deactivate to program interrupt controllers to disable * interrupt delivery. */ void irq_domain_deactivate_irq(struct irq_data *irq_data) { if (irqd_is_activated(irq_data)) { __irq_domain_deactivate_irq(irq_data); irqd_clr_activated(irq_data); } } static void irq_domain_check_hierarchy(struct irq_domain *domain) { /* Hierarchy irq_domains must implement callback alloc() */ if (domain->ops->alloc) domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; } #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ /** * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain * @domain: domain to match * @virq: IRQ number to get irq_data */ struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq) { struct irq_data *irq_data = irq_get_irq_data(virq); return (irq_data && irq_data->domain == domain) ? irq_data : NULL; } EXPORT_SYMBOL_GPL(irq_domain_get_irq_data); /** * irq_domain_set_info - Set the complete data for a @virq in @domain * @domain: Interrupt domain to match * @virq: IRQ number * @hwirq: The hardware interrupt number * @chip: The associated interrupt chip * @chip_data: The associated interrupt chip data * @handler: The interrupt flow handler * @handler_data: The interrupt flow handler data * @handler_name: The interrupt handler name */ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name) { irq_set_chip_and_handler_name(virq, chip, handler, handler_name); irq_set_chip_data(virq, chip_data); irq_set_handler_data(virq, handler_data); } static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity) { return -EINVAL; } static void irq_domain_check_hierarchy(struct irq_domain *domain) { } static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) { } #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS #include "internals.h" static struct dentry *domain_dir; static const struct irq_bit_descr irqdomain_flags[] = { BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_HIERARCHY), BIT_MASK_DESCR(IRQ_DOMAIN_NAME_ALLOCATED), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_IPI_PER_CPU), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_IPI_SINGLE), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_ISOLATED_MSI), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_NO_MAP), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI_PARENT), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_MSI_DEVICE), BIT_MASK_DESCR(IRQ_DOMAIN_FLAG_NONCORE), }; static void irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind) { seq_printf(m, "%*sname: %s\n", ind, "", d->name); seq_printf(m, "%*ssize: %u\n", ind + 1, "", d->revmap_size); seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount); seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags); irq_debug_show_bits(m, ind, d->flags, irqdomain_flags, ARRAY_SIZE(irqdomain_flags)); if (d->ops && d->ops->debug_show) d->ops->debug_show(m, d, NULL, ind + 1); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY if (!d->parent) return; seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name); irq_domain_debug_show_one(m, d->parent, ind + 4); #endif } static int irq_domain_debug_show(struct seq_file *m, void *p) { struct irq_domain *d = m->private; /* Default domain? Might be NULL */ if (!d) { if (!irq_default_domain) return 0; d = irq_default_domain; } irq_domain_debug_show_one(m, d, 0); return 0; } DEFINE_SHOW_ATTRIBUTE(irq_domain_debug); static void debugfs_add_domain_dir(struct irq_domain *d) { if (!d->name || !domain_dir) return; debugfs_create_file(d->name, 0444, domain_dir, d, &irq_domain_debug_fops); } static void debugfs_remove_domain_dir(struct irq_domain *d) { debugfs_lookup_and_remove(d->name, domain_dir); } void __init irq_domain_debugfs_init(struct dentry *root) { struct irq_domain *d; domain_dir = debugfs_create_dir("domains", root); debugfs_create_file("default", 0444, domain_dir, NULL, &irq_domain_debug_fops); mutex_lock(&irq_domain_mutex); list_for_each_entry(d, &irq_domain_list, link) debugfs_add_domain_dir(d); mutex_unlock(&irq_domain_mutex); } #endif
4 3 2 1 1 4 4 1 3 2 2 2 4 2 2 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 // SPDX-License-Identifier: GPL-2.0-or-later /* * SQ905C subdriver * * Copyright (C) 2009 Theodore Kilgore */ /* * * This driver uses work done in * libgphoto2/camlibs/digigr8, Copyright (C) Theodore Kilgore. * * This driver has also used as a base the sq905c driver * and may contain code fragments from it. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905c" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905C USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905C_CMD_TIMEOUT 500 #define SQ905C_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905C_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 0x50 /* Commands. These go in the "value" slot. */ #define SQ905C_CLEAR 0xa0 /* clear everything */ #define SQ905C_GET_ID 0x14f4 /* Read version number */ #define SQ905C_CAPTURE_LOW 0xa040 /* Starts capture at 160x120 */ #define SQ905C_CAPTURE_MED 0x1440 /* Starts capture at 320x240 */ #define SQ905C_CAPTURE_HI 0x2840 /* Starts capture at 320x240 */ /* For capture, this must go in the "index" slot. */ #define SQ905C_CAPTURE_INDEX 0x110f /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ const struct v4l2_pix_format *cap_mode; /* Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; /* * Most of these cameras will do 640x480 and 320x240. 160x120 works * in theory but gives very poor output. Therefore, not supported. * The 0x2770:0x9050 cameras have max resolution of 320x240. */ static struct v4l2_pix_format sq905c_mode[] = { { 320, 240, V4L2_PIX_FMT_SQ905C, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SQ905C, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* Send a command to the camera. */ static int sq905c_command(struct gspca_dev *gspca_dev, u16 command, u16 index) { int ret; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, command, index, NULL, 0, SQ905C_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } static int sq905c_read(struct gspca_dev *gspca_dev, u16 command, u16 index, int size) { int ret; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, command, index, gspca_dev->usb_buf, size, SQ905C_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void sq905c_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int act_len; int packet_type; int ret; u8 *buffer; buffer = kmalloc(SQ905C_MAX_TRANSFER, GFP_KERNEL); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* Request the header, which tells the size to download */ ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), buffer, FRAME_HEADER_LEN, &act_len, SQ905C_DATA_TIMEOUT); gspca_dbg(gspca_dev, D_STREAM, "Got %d bytes out of %d for header\n", act_len, FRAME_HEADER_LEN); if (ret < 0 || act_len < FRAME_HEADER_LEN) goto quit_stream; /* size is read from 4 bytes starting 0x40, little endian */ bytes_left = buffer[0x40]|(buffer[0x41]<<8)|(buffer[0x42]<<16) |(buffer[0x43]<<24); gspca_dbg(gspca_dev, D_STREAM, "bytes_left = 0x%x\n", bytes_left); /* We keep the header. It has other information, too. */ packet_type = FIRST_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, FRAME_HEADER_LEN); while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905C_MAX_TRANSFER ? SQ905C_MAX_TRANSFER : bytes_left; ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), buffer, data_len, &act_len, SQ905C_DATA_TIMEOUT); if (ret < 0 || act_len < data_len) goto quit_stream; gspca_dbg(gspca_dev, D_STREAM, "Got %d bytes out of %d for frame\n", data_len, bytes_left); bytes_left -= data_len; if (bytes_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, data_len); } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905c_command(gspca_dev, SQ905C_CLEAR, 0); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; int ret; gspca_dbg(gspca_dev, D_PROBE, "SQ9050 camera detected (vid/pid 0x%04X:0x%04X)\n", id->idVendor, id->idProduct); ret = sq905c_command(gspca_dev, SQ905C_GET_ID, 0); if (ret < 0) { gspca_err(gspca_dev, "Get version command failed\n"); return ret; } ret = sq905c_read(gspca_dev, 0xf5, 0, 20); if (ret < 0) { gspca_err(gspca_dev, "Reading version command failed\n"); return ret; } /* Note we leave out the usb id and the manufacturing date */ gspca_dbg(gspca_dev, D_PROBE, "SQ9050 ID string: %02x - %*ph\n", gspca_dev->usb_buf[3], 6, gspca_dev->usb_buf + 14); cam->cam_mode = sq905c_mode; cam->nmodes = 2; if (gspca_dev->usb_buf[15] == 0) cam->nmodes = 1; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk_size = 32; cam->bulk = 1; INIT_WORK(&dev->work_struct, sq905c_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905c_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { /* connect to the camera and reset it. */ return sq905c_command(gspca_dev, SQ905C_CLEAR, 0); } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; dev->cap_mode = gspca_dev->cam.cam_mode; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->pixfmt.width) { case 640: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at high resolution\n"); dev->cap_mode++; ret = sq905c_command(gspca_dev, SQ905C_CAPTURE_HI, SQ905C_CAPTURE_INDEX); break; default: /* 320 */ gspca_dbg(gspca_dev, D_STREAM, "Start streaming at medium resolution\n"); ret = sq905c_command(gspca_dev, SQ905C_CAPTURE_MED, SQ905C_CAPTURE_INDEX); } if (ret < 0) { gspca_err(gspca_dev, "Start streaming command failed\n"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); if (!dev->work_thread) return -ENOMEM; queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x905c)}, {USB_DEVICE(0x2770, 0x9050)}, {USB_DEVICE(0x2770, 0x9051)}, {USB_DEVICE(0x2770, 0x9052)}, {USB_DEVICE(0x2770, 0x913d)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 2 2 1 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 // SPDX-License-Identifier: GPL-2.0-only /* * Hauppauge HD PVR USB driver - video 4 linux 2 interface * * Copyright (C) 2008 Janne Grunau (j@jannau.net) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> #include "hdpvr.h" #define BULK_URB_TIMEOUT 90 /* 0.09 seconds */ #define print_buffer_status() { \ v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, \ "%s:%d buffer stat: %d free, %d proc\n", \ __func__, __LINE__, \ list_size(&dev->free_buff_list), \ list_size(&dev->rec_buff_list)); } static const struct v4l2_dv_timings hdpvr_dv_timings[] = { V4L2_DV_BT_CEA_720X480I59_94, V4L2_DV_BT_CEA_720X576I50, V4L2_DV_BT_CEA_720X480P59_94, V4L2_DV_BT_CEA_720X576P50, V4L2_DV_BT_CEA_1280X720P50, V4L2_DV_BT_CEA_1280X720P60, V4L2_DV_BT_CEA_1920X1080I50, V4L2_DV_BT_CEA_1920X1080I60, }; /* Use 480i59 as the default timings */ #define HDPVR_DEF_DV_TIMINGS_IDX (0) struct hdpvr_fh { struct v4l2_fh fh; bool legacy_mode; }; static uint list_size(struct list_head *list) { struct list_head *tmp; uint count = 0; list_for_each(tmp, list) { count++; } return count; } /*=========================================================================*/ /* urb callback */ static void hdpvr_read_bulk_callback(struct urb *urb) { struct hdpvr_buffer *buf = (struct hdpvr_buffer *)urb->context; struct hdpvr_device *dev = buf->dev; /* marking buffer as received and wake waiting */ buf->status = BUFSTAT_READY; wake_up_interruptible(&dev->wait_data); } /*=========================================================================*/ /* buffer bits */ /* function expects dev->io_mutex to be hold by caller */ int hdpvr_cancel_queue(struct hdpvr_device *dev) { struct hdpvr_buffer *buf; list_for_each_entry(buf, &dev->rec_buff_list, buff_list) { usb_kill_urb(buf->urb); buf->status = BUFSTAT_AVAILABLE; } list_splice_init(&dev->rec_buff_list, dev->free_buff_list.prev); return 0; } static int hdpvr_free_queue(struct list_head *q) { struct list_head *tmp; struct list_head *p; struct hdpvr_buffer *buf; struct urb *urb; for (p = q->next; p != q;) { buf = list_entry(p, struct hdpvr_buffer, buff_list); urb = buf->urb; usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); tmp = p->next; list_del(p); kfree(buf); p = tmp; } return 0; } /* function expects dev->io_mutex to be hold by caller */ int hdpvr_free_buffers(struct hdpvr_device *dev) { hdpvr_cancel_queue(dev); hdpvr_free_queue(&dev->free_buff_list); hdpvr_free_queue(&dev->rec_buff_list); return 0; } /* function expects dev->io_mutex to be hold by caller */ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count) { uint i; int retval = -ENOMEM; u8 *mem; struct hdpvr_buffer *buf; struct urb *urb; v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "allocating %u buffers\n", count); for (i = 0; i < count; i++) { buf = kzalloc(sizeof(struct hdpvr_buffer), GFP_KERNEL); if (!buf) { v4l2_err(&dev->v4l2_dev, "cannot allocate buffer\n"); goto exit; } buf->dev = dev; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) goto exit_urb; buf->urb = urb; mem = usb_alloc_coherent(dev->udev, dev->bulk_in_size, GFP_KERNEL, &urb->transfer_dma); if (!mem) { v4l2_err(&dev->v4l2_dev, "cannot allocate usb transfer buffer\n"); goto exit_urb_buffer; } usb_fill_bulk_urb(buf->urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), mem, dev->bulk_in_size, hdpvr_read_bulk_callback, buf); buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; buf->status = BUFSTAT_AVAILABLE; list_add_tail(&buf->buff_list, &dev->free_buff_list); } return 0; exit_urb_buffer: usb_free_urb(urb); exit_urb: kfree(buf); exit: hdpvr_free_buffers(dev); return retval; } static int hdpvr_submit_buffers(struct hdpvr_device *dev) { struct hdpvr_buffer *buf; struct urb *urb; int ret = 0, err_count = 0; mutex_lock(&dev->io_mutex); while (dev->status == STATUS_STREAMING && !list_empty(&dev->free_buff_list)) { buf = list_entry(dev->free_buff_list.next, struct hdpvr_buffer, buff_list); if (buf->status != BUFSTAT_AVAILABLE) { v4l2_err(&dev->v4l2_dev, "buffer not marked as available\n"); ret = -EFAULT; goto err; } urb = buf->urb; urb->status = 0; urb->actual_length = 0; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { v4l2_err(&dev->v4l2_dev, "usb_submit_urb in %s returned %d\n", __func__, ret); if (++err_count > 2) break; continue; } buf->status = BUFSTAT_INPROGRESS; list_move_tail(&buf->buff_list, &dev->rec_buff_list); } err: print_buffer_status(); mutex_unlock(&dev->io_mutex); return ret; } static struct hdpvr_buffer *hdpvr_get_next_buffer(struct hdpvr_device *dev) { struct hdpvr_buffer *buf; mutex_lock(&dev->io_mutex); if (list_empty(&dev->rec_buff_list)) { mutex_unlock(&dev->io_mutex); return NULL; } buf = list_entry(dev->rec_buff_list.next, struct hdpvr_buffer, buff_list); mutex_unlock(&dev->io_mutex); return buf; } static void hdpvr_transmit_buffers(struct work_struct *work) { struct hdpvr_device *dev = container_of(work, struct hdpvr_device, worker); while (dev->status == STATUS_STREAMING) { if (hdpvr_submit_buffers(dev)) { v4l2_err(&dev->v4l2_dev, "couldn't submit buffers\n"); goto error; } if (wait_event_interruptible(dev->wait_buffer, !list_empty(&dev->free_buff_list) || dev->status != STATUS_STREAMING)) goto error; } v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "transmit worker exited\n"); return; error: v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "transmit buffers errored\n"); dev->status = STATUS_ERROR; } /* function expects dev->io_mutex to be hold by caller */ static int hdpvr_start_streaming(struct hdpvr_device *dev) { int ret; struct hdpvr_video_info vidinf; if (dev->status == STATUS_STREAMING) return 0; if (dev->status != STATUS_IDLE) return -EAGAIN; ret = get_video_info(dev, &vidinf); if (ret < 0) return ret; if (!vidinf.valid) { msleep(250); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "no video signal at input %d\n", dev->options.video_input); return -EAGAIN; } v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "video signal: %dx%d@%dhz\n", vidinf.width, vidinf.height, vidinf.fps); /* start streaming 2 request */ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xb8, 0x38, 0x1, 0, NULL, 0, 8000); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "encoder start control request returned %d\n", ret); if (ret < 0) return ret; ret = hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00); if (ret) return ret; dev->status = STATUS_STREAMING; schedule_work(&dev->worker); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "streaming started\n"); return 0; } /* function expects dev->io_mutex to be hold by caller */ static int hdpvr_stop_streaming(struct hdpvr_device *dev) { int actual_length; uint c = 0; u8 *buf; if (dev->status == STATUS_IDLE) return 0; else if (dev->status != STATUS_STREAMING) return -EAGAIN; buf = kmalloc(dev->bulk_in_size, GFP_KERNEL); if (!buf) v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer for emptying the internal device buffer. Next capture start will be slow\n"); dev->status = STATUS_SHUTTING_DOWN; hdpvr_config_call(dev, CTRL_STOP_STREAMING_VALUE, 0x00); mutex_unlock(&dev->io_mutex); wake_up_interruptible(&dev->wait_buffer); msleep(50); flush_work(&dev->worker); mutex_lock(&dev->io_mutex); /* kill the still outstanding urbs */ hdpvr_cancel_queue(dev); /* emptying the device buffer beforeshutting it down */ while (buf && ++c < 500 && !usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), buf, dev->bulk_in_size, &actual_length, BULK_URB_TIMEOUT)) { v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "%2d: got %d bytes\n", c, actual_length); } kfree(buf); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "used %d urbs to empty device buffers\n", c-1); msleep(10); dev->status = STATUS_IDLE; return 0; } /*=======================================================================*/ /* * video 4 linux 2 file operations */ static int hdpvr_open(struct file *file) { struct hdpvr_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (fh == NULL) return -ENOMEM; fh->legacy_mode = true; v4l2_fh_init(&fh->fh, video_devdata(file)); v4l2_fh_add(&fh->fh); file->private_data = fh; return 0; } static int hdpvr_release(struct file *file) { struct hdpvr_device *dev = video_drvdata(file); mutex_lock(&dev->io_mutex); if (file->private_data == dev->owner) { hdpvr_stop_streaming(dev); dev->owner = NULL; } mutex_unlock(&dev->io_mutex); return v4l2_fh_release(file); } /* * hdpvr_v4l2_read() * will allocate buffers when called for the first time */ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count, loff_t *pos) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_buffer *buf = NULL; struct urb *urb; int ret = 0; int rem, cnt; if (*pos) return -ESPIPE; mutex_lock(&dev->io_mutex); if (dev->status == STATUS_IDLE) { if (hdpvr_start_streaming(dev)) { v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "start_streaming failed\n"); ret = -EIO; msleep(200); dev->status = STATUS_IDLE; mutex_unlock(&dev->io_mutex); goto err; } dev->owner = file->private_data; print_buffer_status(); } mutex_unlock(&dev->io_mutex); /* wait for the first buffer */ if (!(file->f_flags & O_NONBLOCK)) { if (wait_event_interruptible(dev->wait_data, !list_empty_careful(&dev->rec_buff_list))) return -ERESTARTSYS; } buf = hdpvr_get_next_buffer(dev); while (count > 0 && buf) { if (buf->status != BUFSTAT_READY && dev->status != STATUS_DISCONNECTED) { int err; /* return nonblocking */ if (file->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; goto err; } err = wait_event_interruptible_timeout(dev->wait_data, buf->status == BUFSTAT_READY, msecs_to_jiffies(1000)); if (err < 0) { ret = err; goto err; } if (!err) { v4l2_info(&dev->v4l2_dev, "timeout: restart streaming\n"); mutex_lock(&dev->io_mutex); hdpvr_stop_streaming(dev); mutex_unlock(&dev->io_mutex); /* * The FW needs about 4 seconds after streaming * stopped before it is ready to restart * streaming. */ msleep(4000); err = hdpvr_start_streaming(dev); if (err) { ret = err; goto err; } } } if (buf->status != BUFSTAT_READY) break; /* set remaining bytes to copy */ urb = buf->urb; rem = urb->actual_length - buf->pos; cnt = rem > count ? count : rem; if (copy_to_user(buffer, urb->transfer_buffer + buf->pos, cnt)) { v4l2_err(&dev->v4l2_dev, "read: copy_to_user failed\n"); if (!ret) ret = -EFAULT; goto err; } buf->pos += cnt; count -= cnt; buffer += cnt; ret += cnt; /* finished, take next buffer */ if (buf->pos == urb->actual_length) { mutex_lock(&dev->io_mutex); buf->pos = 0; buf->status = BUFSTAT_AVAILABLE; list_move_tail(&buf->buff_list, &dev->free_buff_list); print_buffer_status(); mutex_unlock(&dev->io_mutex); wake_up_interruptible(&dev->wait_buffer); buf = hdpvr_get_next_buffer(dev); } } err: if (!ret && !buf) ret = -EAGAIN; return ret; } static __poll_t hdpvr_poll(struct file *filp, poll_table *wait) { __poll_t req_events = poll_requested_events(wait); struct hdpvr_buffer *buf = NULL; struct hdpvr_device *dev = video_drvdata(filp); __poll_t mask = v4l2_ctrl_poll(filp, wait); if (!(req_events & (EPOLLIN | EPOLLRDNORM))) return mask; mutex_lock(&dev->io_mutex); if (dev->status == STATUS_IDLE) { if (hdpvr_start_streaming(dev)) { v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "start_streaming failed\n"); dev->status = STATUS_IDLE; } else { dev->owner = filp->private_data; } print_buffer_status(); } mutex_unlock(&dev->io_mutex); buf = hdpvr_get_next_buffer(dev); /* only wait if no data is available */ if (!buf || buf->status != BUFSTAT_READY) { poll_wait(filp, &dev->wait_data, wait); buf = hdpvr_get_next_buffer(dev); } if (buf && buf->status == BUFSTAT_READY) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static const struct v4l2_file_operations hdpvr_fops = { .owner = THIS_MODULE, .open = hdpvr_open, .release = hdpvr_release, .read = hdpvr_read, .poll = hdpvr_poll, .unlocked_ioctl = video_ioctl2, }; /*=======================================================================*/ /* * V4L2 ioctl handling */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct hdpvr_device *dev = video_drvdata(file); strscpy(cap->driver, "hdpvr", sizeof(cap->driver)); strscpy(cap->card, "Hauppauge HD PVR", sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int vidioc_s_std(struct file *file, void *_fh, v4l2_std_id std) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; u8 std_type = 1; if (!fh->legacy_mode && dev->options.video_input == HDPVR_COMPONENT) return -ENODATA; if (dev->status != STATUS_IDLE) return -EBUSY; if (std & V4L2_STD_525_60) std_type = 0; dev->cur_std = std; dev->width = 720; dev->height = std_type ? 576 : 480; return hdpvr_config_call(dev, CTRL_VIDEO_STD_TYPE, std_type); } static int vidioc_g_std(struct file *file, void *_fh, v4l2_std_id *std) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; if (!fh->legacy_mode && dev->options.video_input == HDPVR_COMPONENT) return -ENODATA; *std = dev->cur_std; return 0; } static int vidioc_querystd(struct file *file, void *_fh, v4l2_std_id *a) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_video_info vid_info; struct hdpvr_fh *fh = _fh; int ret; *a = V4L2_STD_UNKNOWN; if (dev->options.video_input == HDPVR_COMPONENT) return fh->legacy_mode ? 0 : -ENODATA; ret = get_video_info(dev, &vid_info); if (vid_info.valid && vid_info.width == 720 && (vid_info.height == 480 || vid_info.height == 576)) { *a = (vid_info.height == 480) ? V4L2_STD_525_60 : V4L2_STD_625_50; } return ret; } static int vidioc_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; int i; fh->legacy_mode = false; if (dev->options.video_input) return -ENODATA; if (dev->status != STATUS_IDLE) return -EBUSY; for (i = 0; i < ARRAY_SIZE(hdpvr_dv_timings); i++) if (v4l2_match_dv_timings(timings, hdpvr_dv_timings + i, 0, false)) break; if (i == ARRAY_SIZE(hdpvr_dv_timings)) return -EINVAL; dev->cur_dv_timings = hdpvr_dv_timings[i]; dev->width = hdpvr_dv_timings[i].bt.width; dev->height = hdpvr_dv_timings[i].bt.height; return 0; } static int vidioc_g_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; fh->legacy_mode = false; if (dev->options.video_input) return -ENODATA; *timings = dev->cur_dv_timings; return 0; } static int vidioc_query_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; struct hdpvr_video_info vid_info; bool interlaced; int ret = 0; int i; fh->legacy_mode = false; if (dev->options.video_input) return -ENODATA; ret = get_video_info(dev, &vid_info); if (ret) return ret; if (!vid_info.valid) return -ENOLCK; interlaced = vid_info.fps <= 30; for (i = 0; i < ARRAY_SIZE(hdpvr_dv_timings); i++) { const struct v4l2_bt_timings *bt = &hdpvr_dv_timings[i].bt; unsigned hsize; unsigned vsize; unsigned fps; hsize = V4L2_DV_BT_FRAME_WIDTH(bt); vsize = V4L2_DV_BT_FRAME_HEIGHT(bt); fps = (unsigned)bt->pixelclock / (hsize * vsize); if (bt->width != vid_info.width || bt->height != vid_info.height || bt->interlaced != interlaced || (fps != vid_info.fps && fps + 1 != vid_info.fps)) continue; *timings = hdpvr_dv_timings[i]; break; } if (i == ARRAY_SIZE(hdpvr_dv_timings)) ret = -ERANGE; return ret; } static int vidioc_enum_dv_timings(struct file *file, void *_fh, struct v4l2_enum_dv_timings *timings) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; fh->legacy_mode = false; memset(timings->reserved, 0, sizeof(timings->reserved)); if (dev->options.video_input) return -ENODATA; if (timings->index >= ARRAY_SIZE(hdpvr_dv_timings)) return -EINVAL; timings->timings = hdpvr_dv_timings[timings->index]; return 0; } static int vidioc_dv_timings_cap(struct file *file, void *_fh, struct v4l2_dv_timings_cap *cap) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; fh->legacy_mode = false; if (dev->options.video_input) return -ENODATA; cap->type = V4L2_DV_BT_656_1120; cap->bt.min_width = 720; cap->bt.max_width = 1920; cap->bt.min_height = 480; cap->bt.max_height = 1080; cap->bt.min_pixelclock = 27000000; cap->bt.max_pixelclock = 74250000; cap->bt.standards = V4L2_DV_BT_STD_CEA861; cap->bt.capabilities = V4L2_DV_BT_CAP_INTERLACED | V4L2_DV_BT_CAP_PROGRESSIVE; return 0; } static const char *iname[] = { [HDPVR_COMPONENT] = "Component", [HDPVR_SVIDEO] = "S-Video", [HDPVR_COMPOSITE] = "Composite", }; static int vidioc_enum_input(struct file *file, void *_fh, struct v4l2_input *i) { unsigned int n; n = i->index; if (n >= HDPVR_VIDEO_INPUTS) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; strscpy(i->name, iname[n], sizeof(i->name)); i->audioset = 1<<HDPVR_RCA_FRONT | 1<<HDPVR_RCA_BACK | 1<<HDPVR_SPDIF; i->capabilities = n ? V4L2_IN_CAP_STD : V4L2_IN_CAP_DV_TIMINGS; i->std = n ? V4L2_STD_ALL : 0; return 0; } static int vidioc_s_input(struct file *file, void *_fh, unsigned int index) { struct hdpvr_device *dev = video_drvdata(file); int retval; if (index >= HDPVR_VIDEO_INPUTS) return -EINVAL; if (dev->status != STATUS_IDLE) return -EBUSY; retval = hdpvr_config_call(dev, CTRL_VIDEO_INPUT_VALUE, index+1); if (!retval) { dev->options.video_input = index; /* * Unfortunately gstreamer calls ENUMSTD and bails out if it * won't find any formats, even though component input is * selected. This means that we have to leave tvnorms at * V4L2_STD_ALL. We cannot use the 'legacy' trick since * tvnorms is set at the device node level and not at the * filehandle level. * * Comment this out for now, but if the legacy mode can be * removed in the future, then this code should be enabled * again. dev->video_dev.tvnorms = (index != HDPVR_COMPONENT) ? V4L2_STD_ALL : 0; */ } return retval; } static int vidioc_g_input(struct file *file, void *private_data, unsigned int *index) { struct hdpvr_device *dev = video_drvdata(file); *index = dev->options.video_input; return 0; } static const char *audio_iname[] = { [HDPVR_RCA_FRONT] = "RCA front", [HDPVR_RCA_BACK] = "RCA back", [HDPVR_SPDIF] = "SPDIF", }; static int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *audio) { unsigned int n; n = audio->index; if (n >= HDPVR_AUDIO_INPUTS) return -EINVAL; audio->capability = V4L2_AUDCAP_STEREO; strscpy(audio->name, audio_iname[n], sizeof(audio->name)); return 0; } static int vidioc_s_audio(struct file *file, void *private_data, const struct v4l2_audio *audio) { struct hdpvr_device *dev = video_drvdata(file); int retval; if (audio->index >= HDPVR_AUDIO_INPUTS) return -EINVAL; if (dev->status != STATUS_IDLE) return -EBUSY; retval = hdpvr_set_audio(dev, audio->index+1, dev->options.audio_codec); if (!retval) dev->options.audio_input = audio->index; return retval; } static int vidioc_g_audio(struct file *file, void *private_data, struct v4l2_audio *audio) { struct hdpvr_device *dev = video_drvdata(file); audio->index = dev->options.audio_input; audio->capability = V4L2_AUDCAP_STEREO; strscpy(audio->name, audio_iname[audio->index], sizeof(audio->name)); return 0; } static int hdpvr_try_ctrl(struct v4l2_ctrl *ctrl) { struct hdpvr_device *dev = container_of(ctrl->handler, struct hdpvr_device, hdl); switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: if (ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && dev->video_bitrate->val >= dev->video_bitrate_peak->val) dev->video_bitrate_peak->val = dev->video_bitrate->val + 100000; break; } return 0; } static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl) { struct hdpvr_device *dev = container_of(ctrl->handler, struct hdpvr_device, hdl); struct hdpvr_options *opt = &dev->options; int ret = -EINVAL; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ret = hdpvr_config_call(dev, CTRL_BRIGHTNESS, ctrl->val); if (ret) break; dev->options.brightness = ctrl->val; return 0; case V4L2_CID_CONTRAST: ret = hdpvr_config_call(dev, CTRL_CONTRAST, ctrl->val); if (ret) break; dev->options.contrast = ctrl->val; return 0; case V4L2_CID_SATURATION: ret = hdpvr_config_call(dev, CTRL_SATURATION, ctrl->val); if (ret) break; dev->options.saturation = ctrl->val; return 0; case V4L2_CID_HUE: ret = hdpvr_config_call(dev, CTRL_HUE, ctrl->val); if (ret) break; dev->options.hue = ctrl->val; return 0; case V4L2_CID_SHARPNESS: ret = hdpvr_config_call(dev, CTRL_SHARPNESS, ctrl->val); if (ret) break; dev->options.sharpness = ctrl->val; return 0; case V4L2_CID_MPEG_AUDIO_ENCODING: if (dev->flags & HDPVR_FLAG_AC3_CAP) { opt->audio_codec = ctrl->val; return hdpvr_set_audio(dev, opt->audio_input + 1, opt->audio_codec); } return 0; case V4L2_CID_MPEG_VIDEO_ENCODING: return 0; /* case V4L2_CID_MPEG_VIDEO_B_FRAMES: */ /* if (ctrl->value == 0 && !(opt->gop_mode & 0x2)) { */ /* opt->gop_mode |= 0x2; */ /* hdpvr_config_call(dev, CTRL_GOP_MODE_VALUE, */ /* opt->gop_mode); */ /* } */ /* if (ctrl->value == 128 && opt->gop_mode & 0x2) { */ /* opt->gop_mode &= ~0x2; */ /* hdpvr_config_call(dev, CTRL_GOP_MODE_VALUE, */ /* opt->gop_mode); */ /* } */ /* break; */ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: { uint peak_bitrate = dev->video_bitrate_peak->val / 100000; uint bitrate = dev->video_bitrate->val / 100000; if (ctrl->is_new) { if (ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) opt->bitrate_mode = HDPVR_CONSTANT; else opt->bitrate_mode = HDPVR_VARIABLE_AVERAGE; hdpvr_config_call(dev, CTRL_BITRATE_MODE_VALUE, opt->bitrate_mode); v4l2_ctrl_activate(dev->video_bitrate_peak, ctrl->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); } if (dev->video_bitrate_peak->is_new || dev->video_bitrate->is_new) { opt->bitrate = bitrate; opt->peak_bitrate = peak_bitrate; hdpvr_set_bitrate(dev); } return 0; } case V4L2_CID_MPEG_STREAM_TYPE: return 0; default: break; } return ret; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data, struct v4l2_fmtdesc *f) { if (f->index != 0) return -EINVAL; f->pixelformat = V4L2_PIX_FMT_MPEG; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *_fh, struct v4l2_format *f) { struct hdpvr_device *dev = video_drvdata(file); struct hdpvr_fh *fh = _fh; int ret; /* * The original driver would always returns the current detected * resolution as the format (and EFAULT if it couldn't be detected). * With the introduction of VIDIOC_QUERY_DV_TIMINGS there is now a * better way of doing this, but to stay compatible with existing * applications we assume legacy mode every time an application opens * the device. Only if one of the new DV_TIMINGS ioctls is called * will the filehandle go into 'normal' mode where g_fmt returns the * last set format. */ if (fh->legacy_mode) { struct hdpvr_video_info vid_info; ret = get_video_info(dev, &vid_info); if (ret < 0) return ret; if (!vid_info.valid) return -EFAULT; f->fmt.pix.width = vid_info.width; f->fmt.pix.height = vid_info.height; } else { f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; } f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.sizeimage = dev->bulk_in_size; f->fmt.pix.bytesperline = 0; if (f->fmt.pix.width == 720) { /* SDTV formats */ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; f->fmt.pix.field = V4L2_FIELD_INTERLACED; } else { /* HDTV formats */ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; f->fmt.pix.field = V4L2_FIELD_NONE; } return 0; } static int vidioc_encoder_cmd(struct file *filp, void *priv, struct v4l2_encoder_cmd *a) { struct hdpvr_device *dev = video_drvdata(filp); int res = 0; mutex_lock(&dev->io_mutex); a->flags = 0; switch (a->cmd) { case V4L2_ENC_CMD_START: if (dev->owner && filp->private_data != dev->owner) { res = -EBUSY; break; } if (dev->status == STATUS_STREAMING) break; res = hdpvr_start_streaming(dev); if (!res) dev->owner = filp->private_data; else dev->status = STATUS_IDLE; break; case V4L2_ENC_CMD_STOP: if (dev->owner && filp->private_data != dev->owner) { res = -EBUSY; break; } if (dev->status == STATUS_IDLE) break; res = hdpvr_stop_streaming(dev); if (!res) dev->owner = NULL; break; default: v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "Unsupported encoder cmd %d\n", a->cmd); res = -EINVAL; break; } mutex_unlock(&dev->io_mutex); return res; } static int vidioc_try_encoder_cmd(struct file *filp, void *priv, struct v4l2_encoder_cmd *a) { a->flags = 0; switch (a->cmd) { case V4L2_ENC_CMD_START: case V4L2_ENC_CMD_STOP: return 0; default: return -EINVAL; } } static const struct v4l2_ioctl_ops hdpvr_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_s_std = vidioc_s_std, .vidioc_g_std = vidioc_g_std, .vidioc_querystd = vidioc_querystd, .vidioc_s_dv_timings = vidioc_s_dv_timings, .vidioc_g_dv_timings = vidioc_g_dv_timings, .vidioc_query_dv_timings= vidioc_query_dv_timings, .vidioc_enum_dv_timings = vidioc_enum_dv_timings, .vidioc_dv_timings_cap = vidioc_dv_timings_cap, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_enumaudio = vidioc_enumaudio, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_enum_fmt_vid_cap= vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_encoder_cmd = vidioc_encoder_cmd, .vidioc_try_encoder_cmd = vidioc_try_encoder_cmd, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static void hdpvr_device_release(struct video_device *vdev) { struct hdpvr_device *dev = video_get_drvdata(vdev); hdpvr_delete(dev); flush_work(&dev->worker); v4l2_device_unregister(&dev->v4l2_dev); v4l2_ctrl_handler_free(&dev->hdl); /* deregister I2C adapter */ #if IS_ENABLED(CONFIG_I2C) mutex_lock(&dev->i2c_mutex); i2c_del_adapter(&dev->i2c_adapter); mutex_unlock(&dev->i2c_mutex); #endif /* CONFIG_I2C */ kfree(dev->usbc_buf); kfree(dev); } static const struct video_device hdpvr_video_template = { .fops = &hdpvr_fops, .release = hdpvr_device_release, .ioctl_ops = &hdpvr_ioctl_ops, .tvnorms = V4L2_STD_ALL, .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE, }; static const struct v4l2_ctrl_ops hdpvr_ctrl_ops = { .try_ctrl = hdpvr_try_ctrl, .s_ctrl = hdpvr_s_ctrl, }; int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, int devnum) { struct v4l2_ctrl_handler *hdl = &dev->hdl; bool ac3 = dev->flags & HDPVR_FLAG_AC3_CAP; int res; // initialize dev->worker INIT_WORK(&dev->worker, hdpvr_transmit_buffers); dev->cur_std = V4L2_STD_525_60; dev->width = 720; dev->height = 480; dev->cur_dv_timings = hdpvr_dv_timings[HDPVR_DEF_DV_TIMINGS_IDX]; v4l2_ctrl_handler_init(hdl, 11); if (dev->fw_ver > 0x15) { v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_BRIGHTNESS, 0x0, 0xff, 1, 0x80); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_CONTRAST, 0x0, 0xff, 1, 0x40); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_SATURATION, 0x0, 0xff, 1, 0x40); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_HUE, 0x0, 0x1e, 1, 0xf); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_SHARPNESS, 0x0, 0xff, 1, 0x80); } else { v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_BRIGHTNESS, 0x0, 0xff, 1, 0x86); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_CONTRAST, 0x0, 0xff, 1, 0x80); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_SATURATION, 0x0, 0xff, 1, 0x80); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_HUE, 0x0, 0xff, 1, 0x80); v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_SHARPNESS, 0x0, 0xff, 1, 0x80); } v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_STREAM_TYPE, V4L2_MPEG_STREAM_TYPE_MPEG2_TS, 0x1, V4L2_MPEG_STREAM_TYPE_MPEG2_TS); v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_AUDIO_ENCODING, ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC); v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC); dev->video_mode = v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); dev->video_bitrate = v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, 1000000, 13500000, 100000, 6500000); dev->video_bitrate_peak = v4l2_ctrl_new_std(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, 1100000, 20200000, 100000, 9000000); dev->v4l2_dev.ctrl_handler = hdl; if (hdl->error) { res = hdl->error; v4l2_err(&dev->v4l2_dev, "Could not register controls\n"); goto error; } v4l2_ctrl_cluster(3, &dev->video_mode); res = v4l2_ctrl_handler_setup(hdl); if (res < 0) { v4l2_err(&dev->v4l2_dev, "Could not setup controls\n"); goto error; } /* setup and register video device */ dev->video_dev = hdpvr_video_template; strscpy(dev->video_dev.name, "Hauppauge HD PVR", sizeof(dev->video_dev.name)); dev->video_dev.v4l2_dev = &dev->v4l2_dev; video_set_drvdata(&dev->video_dev, dev); res = video_register_device(&dev->video_dev, VFL_TYPE_VIDEO, devnum); if (res < 0) { v4l2_err(&dev->v4l2_dev, "video_device registration failed\n"); goto error; } return 0; error: v4l2_ctrl_handler_free(hdl); return res; }
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 // SPDX-License-Identifier: GPL-2.0 /* * Functions related to mapping data to requests */ #include <linux/kernel.h> #include <linux/sched/task_stack.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/uio.h> #include "blk.h" struct bio_map_data { bool is_our_pages : 1; bool is_null_mapped : 1; struct iov_iter iter; struct iovec iov[]; }; static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, gfp_t gfp_mask) { struct bio_map_data *bmd; if (data->nr_segs > UIO_MAXIOV) return NULL; bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); if (!bmd) return NULL; bmd->iter = *data; if (iter_is_iovec(data)) { memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs); bmd->iter.__iov = bmd->iov; } return bmd; } /** * bio_copy_from_iter - copy all pages from iov_iter to bio * @bio: The &struct bio which describes the I/O as destination * @iter: iov_iter as source * * Copy all pages from iov_iter to bio. * Returns 0 on success, or error on failure. */ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) { struct bio_vec *bvec; struct bvec_iter_all iter_all; bio_for_each_segment_all(bvec, bio, iter_all) { ssize_t ret; ret = copy_page_from_iter(bvec->bv_page, bvec->bv_offset, bvec->bv_len, iter); if (!iov_iter_count(iter)) break; if (ret < bvec->bv_len) return -EFAULT; } return 0; } /** * bio_copy_to_iter - copy all pages from bio to iov_iter * @bio: The &struct bio which describes the I/O as source * @iter: iov_iter as destination * * Copy all pages from bio to iov_iter. * Returns 0 on success, or error on failure. */ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) { struct bio_vec *bvec; struct bvec_iter_all iter_all; bio_for_each_segment_all(bvec, bio, iter_all) { ssize_t ret; ret = copy_page_to_iter(bvec->bv_page, bvec->bv_offset, bvec->bv_len, &iter); if (!iov_iter_count(&iter)) break; if (ret < bvec->bv_len) return -EFAULT; } return 0; } /** * bio_uncopy_user - finish previously mapped bio * @bio: bio being terminated * * Free pages allocated from bio_copy_user_iov() and write back data * to user space in case of a read. */ static int bio_uncopy_user(struct bio *bio) { struct bio_map_data *bmd = bio->bi_private; int ret = 0; if (!bmd->is_null_mapped) { /* * if we're in a workqueue, the request is orphaned, so * don't copy into a random user address space, just free * and return -EINTR so user space doesn't expect any data. */ if (!current->mm) ret = -EINTR; else if (bio_data_dir(bio) == READ) ret = bio_copy_to_iter(bio, bmd->iter); if (bmd->is_our_pages) bio_free_pages(bio); } kfree(bmd); return ret; } static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask) { struct bio_map_data *bmd; struct page *page; struct bio *bio; int i = 0, ret; int nr_pages; unsigned int len = iter->count; unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; bmd = bio_alloc_map_data(iter, gfp_mask); if (!bmd) return -ENOMEM; /* * We need to do a deep copy of the iov_iter including the iovecs. * The caller provided iov might point to an on-stack or otherwise * shortlived one. */ bmd->is_our_pages = !map_data; bmd->is_null_mapped = (map_data && map_data->null_mapped); nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); ret = -ENOMEM; bio = bio_kmalloc(nr_pages, gfp_mask); if (!bio) goto out_bmd; bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); if (map_data) { nr_pages = 1U << map_data->page_order; i = map_data->offset / PAGE_SIZE; } while (len) { unsigned int bytes = PAGE_SIZE; bytes -= offset; if (bytes > len) bytes = len; if (map_data) { if (i == map_data->nr_entries * nr_pages) { ret = -ENOMEM; goto cleanup; } page = map_data->pages[i / nr_pages]; page += (i % nr_pages); i++; } else { page = alloc_page(GFP_NOIO | gfp_mask); if (!page) { ret = -ENOMEM; goto cleanup; } } if (bio_add_page(bio, page, bytes, offset) < bytes) { if (!map_data) __free_page(page); break; } len -= bytes; offset = 0; } if (map_data) map_data->offset += bio->bi_iter.bi_size; /* * success */ if (iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) { ret = bio_copy_from_iter(bio, iter); if (ret) goto cleanup; } else if (map_data && map_data->from_user) { struct iov_iter iter2 = *iter; /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */ iter2.data_source = ITER_SOURCE; ret = bio_copy_from_iter(bio, &iter2); if (ret) goto cleanup; } else { if (bmd->is_our_pages) zero_fill_bio(bio); iov_iter_advance(iter, bio->bi_iter.bi_size); } bio->bi_private = bmd; ret = blk_rq_append_bio(rq, bio); if (ret) goto cleanup; return 0; cleanup: if (!map_data) bio_free_pages(bio); bio_uninit(bio); kfree(bio); out_bmd: kfree(bmd); return ret; } static void blk_mq_map_bio_put(struct bio *bio) { if (bio->bi_opf & REQ_ALLOC_CACHE) { bio_put(bio); } else { bio_uninit(bio); kfree(bio); } } static struct bio *blk_rq_map_bio_alloc(struct request *rq, unsigned int nr_vecs, gfp_t gfp_mask) { struct bio *bio; if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) { bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, &fs_bio_set); if (!bio) return NULL; } else { bio = bio_kmalloc(nr_vecs, gfp_mask); if (!bio) return NULL; bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); } return bio; } static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, gfp_t gfp_mask) { unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); struct bio *bio; int ret; if (!iov_iter_count(iter)) return -EINVAL; bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); if (!bio) return -ENOMEM; ret = bio_iov_iter_get_pages(bio, iter); if (ret) goto out_put; ret = blk_rq_append_bio(rq, bio); if (ret) goto out_release; return 0; out_release: bio_release_pages(bio, false); out_put: blk_mq_map_bio_put(bio); return ret; } static void bio_invalidate_vmalloc_pages(struct bio *bio) { #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE if (bio->bi_private && !op_is_write(bio_op(bio))) { unsigned long i, len = 0; for (i = 0; i < bio->bi_vcnt; i++) len += bio->bi_io_vec[i].bv_len; invalidate_kernel_vmap_range(bio->bi_private, len); } #endif } static void bio_map_kern_endio(struct bio *bio) { bio_invalidate_vmalloc_pages(bio); bio_uninit(bio); kfree(bio); } /** * bio_map_kern - map kernel address into bio * @q: the struct request_queue for the bio * @data: pointer to buffer to map * @len: length in bytes * @gfp_mask: allocation flags for bio allocation * * Map the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ static struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) { unsigned long kaddr = (unsigned long)data; unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long start = kaddr >> PAGE_SHIFT; const int nr_pages = end - start; bool is_vmalloc = is_vmalloc_addr(data); struct page *page; int offset, i; struct bio *bio; bio = bio_kmalloc(nr_pages, gfp_mask); if (!bio) return ERR_PTR(-ENOMEM); bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); if (is_vmalloc) { flush_kernel_vmap_range(data, len); bio->bi_private = data; } offset = offset_in_page(kaddr); for (i = 0; i < nr_pages; i++) { unsigned int bytes = PAGE_SIZE - offset; if (len <= 0) break; if (bytes > len) bytes = len; if (!is_vmalloc) page = virt_to_page(data); else page = vmalloc_to_page(data); if (bio_add_page(bio, page, bytes, offset) < bytes) { /* we don't support partial mappings */ bio_uninit(bio); kfree(bio); return ERR_PTR(-EINVAL); } data += bytes; len -= bytes; offset = 0; } bio->bi_end_io = bio_map_kern_endio; return bio; } static void bio_copy_kern_endio(struct bio *bio) { bio_free_pages(bio); bio_uninit(bio); kfree(bio); } static void bio_copy_kern_endio_read(struct bio *bio) { char *p = bio->bi_private; struct bio_vec *bvec; struct bvec_iter_all iter_all; bio_for_each_segment_all(bvec, bio, iter_all) { memcpy_from_bvec(p, bvec); p += bvec->bv_len; } bio_copy_kern_endio(bio); } /** * bio_copy_kern - copy kernel address into bio * @q: the struct request_queue for the bio * @data: pointer to buffer to copy * @len: length in bytes * @gfp_mask: allocation flags for bio and page allocation * @reading: data direction is READ * * copy the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ static struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask, int reading) { unsigned long kaddr = (unsigned long)data; unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long start = kaddr >> PAGE_SHIFT; struct bio *bio; void *p = data; int nr_pages = 0; /* * Overflow, abort */ if (end < start) return ERR_PTR(-EINVAL); nr_pages = end - start; bio = bio_kmalloc(nr_pages, gfp_mask); if (!bio) return ERR_PTR(-ENOMEM); bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); while (len) { struct page *page; unsigned int bytes = PAGE_SIZE; if (bytes > len) bytes = len; page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); if (!page) goto cleanup; if (!reading) memcpy(page_address(page), p, bytes); if (bio_add_page(bio, page, bytes, 0) < bytes) break; len -= bytes; p += bytes; } if (reading) { bio->bi_end_io = bio_copy_kern_endio_read; bio->bi_private = data; } else { bio->bi_end_io = bio_copy_kern_endio; } return bio; cleanup: bio_free_pages(bio); bio_uninit(bio); kfree(bio); return ERR_PTR(-ENOMEM); } /* * Append a bio to a passthrough request. Only works if the bio can be merged * into the request based on the driver constraints. */ int blk_rq_append_bio(struct request *rq, struct bio *bio) { const struct queue_limits *lim = &rq->q->limits; unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT; unsigned int nr_segs = 0; int ret; /* check that the data layout matches the hardware restrictions */ ret = bio_split_rw_at(bio, lim, &nr_segs, max_bytes); if (ret) { /* if we would have to split the bio, copy instead */ if (ret > 0) ret = -EREMOTEIO; return ret; } if (rq->bio) { if (!ll_back_merge_fn(rq, bio, nr_segs)) return -EINVAL; rq->biotail->bi_next = bio; rq->biotail = bio; rq->__data_len += bio->bi_iter.bi_size; bio_crypt_free_ctx(bio); return 0; } rq->nr_phys_segments = nr_segs; rq->bio = rq->biotail = bio; rq->__data_len = bio->bi_iter.bi_size; return 0; } EXPORT_SYMBOL(blk_rq_append_bio); /* Prepare bio for passthrough IO given ITER_BVEC iter */ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) { unsigned int max_bytes = rq->q->limits.max_hw_sectors << SECTOR_SHIFT; struct bio *bio; int ret; if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes) return -EINVAL; /* reuse the bvecs from the iterator instead of allocating new ones */ bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); if (!bio) return -ENOMEM; bio_iov_bvec_set(bio, iter); ret = blk_rq_append_bio(rq, bio); if (ret) blk_mq_map_bio_put(bio); return ret; } /** * blk_rq_map_user_iov - map user data to a request, for passthrough requests * @q: request queue where request should be inserted * @rq: request to map data to * @map_data: pointer to the rq_map_data holding pages (if necessary) * @iter: iovec iterator * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly for zero copy I/O, if possible. Otherwise * a kernel bounce buffer is used. * * A matching blk_rq_unmap_user() must be issued at the end of I/O, while * still in process context. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) { bool copy = false, map_bvec = false; unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits); struct bio *bio = NULL; struct iov_iter i; int ret = -EINVAL; if (map_data) copy = true; else if (blk_queue_may_bounce(q)) copy = true; else if (iov_iter_alignment(iter) & align) copy = true; else if (iov_iter_is_bvec(iter)) map_bvec = true; else if (!user_backed_iter(iter)) copy = true; else if (queue_virt_boundary(q)) copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); if (map_bvec) { ret = blk_rq_map_user_bvec(rq, iter); if (!ret) return 0; if (ret != -EREMOTEIO) goto fail; /* fall back to copying the data on limits mismatches */ copy = true; } i = *iter; do { if (copy) ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); else ret = bio_map_user_iov(rq, &i, gfp_mask); if (ret) { if (ret == -EREMOTEIO) ret = -EINVAL; goto unmap_rq; } if (!bio) bio = rq->bio; } while (iov_iter_count(&i)); return 0; unmap_rq: blk_rq_unmap_user(bio); fail: rq->bio = NULL; return ret; } EXPORT_SYMBOL(blk_rq_map_user_iov); int blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) { struct iov_iter i; int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i); if (unlikely(ret < 0)) return ret; return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); } EXPORT_SYMBOL(blk_rq_map_user); int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, bool vec, int iov_count, bool check_iter_count, int rw) { int ret = 0; if (vec) { struct iovec fast_iov[UIO_FASTIOV]; struct iovec *iov = fast_iov; struct iov_iter iter; ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len, UIO_FASTIOV, &iov, &iter); if (ret < 0) return ret; if (iov_count) { /* SG_IO howto says that the shorter of the two wins */ iov_iter_truncate(&iter, buf_len); if (check_iter_count && !iov_iter_count(&iter)) { kfree(iov); return -EINVAL; } } ret = blk_rq_map_user_iov(req->q, req, map_data, &iter, gfp_mask); kfree(iov); } else if (buf_len) { ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len, gfp_mask); } return ret; } EXPORT_SYMBOL(blk_rq_map_user_io); /** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list * * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since * the I/O completion may have changed rq->bio. */ int blk_rq_unmap_user(struct bio *bio) { struct bio *next_bio; int ret = 0, ret2; while (bio) { if (bio->bi_private) { ret2 = bio_uncopy_user(bio); if (ret2 && !ret) ret = ret2; } else { bio_release_pages(bio, bio_data_dir(bio) == READ); } if (bio_integrity(bio)) bio_integrity_unmap_user(bio); next_bio = bio; bio = bio->bi_next; blk_mq_map_bio_put(next_bio); } return ret; } EXPORT_SYMBOL(blk_rq_unmap_user); /** * blk_rq_map_kern - map kernel data to a request, for passthrough requests * @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly if possible. Otherwise a bounce * buffer is used. Can be called multiple times to append multiple * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { int reading = rq_data_dir(rq) == READ; unsigned long addr = (unsigned long) kbuf; struct bio *bio; int ret; if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || blk_queue_may_bounce(q)) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else bio = bio_map_kern(q, kbuf, len, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); bio->bi_opf &= ~REQ_OP_MASK; bio->bi_opf |= req_op(rq); ret = blk_rq_append_bio(rq, bio); if (unlikely(ret)) { bio_uninit(bio); kfree(bio); } return ret; } EXPORT_SYMBOL(blk_rq_map_kern);
105 103 103 103 103 103 103 105 102 105 104 105 103 103 102 102 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 // SPDX-License-Identifier: GPL-2.0 /* * NETLINK Netlink attributes * * Authors: Thomas Graf <tgraf@suug.ch> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/nospec.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <net/netlink.h> /* For these data types, attribute length should be exactly the given * size. However, to maintain compatibility with broken commands, if the * attribute length does not match the expected size a warning is emitted * to the user that the command is sending invalid data and needs to be fixed. */ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), [NLA_BE16] = sizeof(__be16), [NLA_BE32] = sizeof(__be32), }; static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_MSECS] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, [NLA_S8] = sizeof(s8), [NLA_S16] = sizeof(s16), [NLA_S32] = sizeof(s32), [NLA_S64] = sizeof(s64), [NLA_BE16] = sizeof(__be16), [NLA_BE32] = sizeof(__be32), }; /* * Nested policies might refer back to the original * policy in some cases, and userspace could try to * abuse that and recurse by nesting in the right * ways. Limit recursion to avoid this problem. */ #define MAX_POLICY_RECURSION_DEPTH 10 static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, struct nlattr **tb, unsigned int depth); static int validate_nla_bitfield32(const struct nlattr *nla, const u32 valid_flags_mask) { const struct nla_bitfield32 *bf = nla_data(nla); if (!valid_flags_mask) return -EINVAL; /*disallow invalid bit selector */ if (bf->selector & ~valid_flags_mask) return -EINVAL; /*disallow invalid bit values */ if (bf->value & ~valid_flags_mask) return -EINVAL; /*disallow valid bit values that are not selected*/ if (bf->value & ~bf->selector) return -EINVAL; return 0; } static int nla_validate_array(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack, unsigned int validate, unsigned int depth) { const struct nlattr *entry; int rem; nla_for_each_attr(entry, head, len, rem) { int ret; if (nla_len(entry) == 0) continue; if (nla_len(entry) < NLA_HDRLEN) { NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy, "Array element too short"); return -ERANGE; } ret = __nla_validate_parse(nla_data(entry), nla_len(entry), maxtype, policy, validate, extack, NULL, depth + 1); if (ret < 0) return ret; } return 0; } void nla_get_range_unsigned(const struct nla_policy *pt, struct netlink_range_validation *range) { WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR && (pt->min < 0 || pt->max < 0)); range->min = 0; switch (pt->type) { case NLA_U8: range->max = U8_MAX; break; case NLA_U16: case NLA_BE16: case NLA_BINARY: range->max = U16_MAX; break; case NLA_U32: case NLA_BE32: range->max = U32_MAX; break; case NLA_U64: case NLA_UINT: case NLA_MSECS: range->max = U64_MAX; break; default: WARN_ON_ONCE(1); return; } switch (pt->validation_type) { case NLA_VALIDATE_RANGE: case NLA_VALIDATE_RANGE_WARN_TOO_LONG: range->min = pt->min; range->max = pt->max; break; case NLA_VALIDATE_RANGE_PTR: *range = *pt->range; break; case NLA_VALIDATE_MIN: range->min = pt->min; break; case NLA_VALIDATE_MAX: range->max = pt->max; break; default: break; } } static int nla_validate_range_unsigned(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack, unsigned int validate) { struct netlink_range_validation range; u64 value; switch (pt->type) { case NLA_U8: value = nla_get_u8(nla); break; case NLA_U16: value = nla_get_u16(nla); break; case NLA_U32: value = nla_get_u32(nla); break; case NLA_U64: value = nla_get_u64(nla); break; case NLA_UINT: value = nla_get_uint(nla); break; case NLA_MSECS: value = nla_get_u64(nla); break; case NLA_BINARY: value = nla_len(nla); break; case NLA_BE16: value = ntohs(nla_get_be16(nla)); break; case NLA_BE32: value = ntohl(nla_get_be32(nla)); break; default: return -EINVAL; } nla_get_range_unsigned(pt, &range); if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG && pt->type == NLA_BINARY && value > range.max) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, pt->type); if (validate & NL_VALIDATE_STRICT_ATTRS) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "invalid attribute length"); return -EINVAL; } /* this assumes min <= max (don't validate against min) */ return 0; } if (value < range.min || value > range.max) { bool binary = pt->type == NLA_BINARY; if (binary) NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "binary attribute size out of range"); else NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "integer out of range"); return -ERANGE; } return 0; } void nla_get_range_signed(const struct nla_policy *pt, struct netlink_range_validation_signed *range) { switch (pt->type) { case NLA_S8: range->min = S8_MIN; range->max = S8_MAX; break; case NLA_S16: range->min = S16_MIN; range->max = S16_MAX; break; case NLA_S32: range->min = S32_MIN; range->max = S32_MAX; break; case NLA_S64: case NLA_SINT: range->min = S64_MIN; range->max = S64_MAX; break; default: WARN_ON_ONCE(1); return; } switch (pt->validation_type) { case NLA_VALIDATE_RANGE: range->min = pt->min; range->max = pt->max; break; case NLA_VALIDATE_RANGE_PTR: *range = *pt->range_signed; break; case NLA_VALIDATE_MIN: range->min = pt->min; break; case NLA_VALIDATE_MAX: range->max = pt->max; break; default: break; } } static int nla_validate_int_range_signed(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack) { struct netlink_range_validation_signed range; s64 value; switch (pt->type) { case NLA_S8: value = nla_get_s8(nla); break; case NLA_S16: value = nla_get_s16(nla); break; case NLA_S32: value = nla_get_s32(nla); break; case NLA_S64: value = nla_get_s64(nla); break; case NLA_SINT: value = nla_get_sint(nla); break; default: return -EINVAL; } nla_get_range_signed(pt, &range); if (value < range.min || value > range.max) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "integer out of range"); return -ERANGE; } return 0; } static int nla_validate_int_range(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack, unsigned int validate) { switch (pt->type) { case NLA_U8: case NLA_U16: case NLA_U32: case NLA_U64: case NLA_UINT: case NLA_MSECS: case NLA_BINARY: case NLA_BE16: case NLA_BE32: return nla_validate_range_unsigned(pt, nla, extack, validate); case NLA_S8: case NLA_S16: case NLA_S32: case NLA_S64: case NLA_SINT: return nla_validate_int_range_signed(pt, nla, extack); default: WARN_ON(1); return -EINVAL; } } static int nla_validate_mask(const struct nla_policy *pt, const struct nlattr *nla, struct netlink_ext_ack *extack) { u64 value; switch (pt->type) { case NLA_U8: value = nla_get_u8(nla); break; case NLA_U16: value = nla_get_u16(nla); break; case NLA_U32: value = nla_get_u32(nla); break; case NLA_U64: value = nla_get_u64(nla); break; case NLA_UINT: value = nla_get_uint(nla); break; case NLA_BE16: value = ntohs(nla_get_be16(nla)); break; case NLA_BE32: value = ntohl(nla_get_be32(nla)); break; default: return -EINVAL; } if (value & ~(u64)pt->mask) { NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set"); return -EINVAL; } return 0; } static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, unsigned int depth) { u16 strict_start_type = policy[0].strict_start_type; const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); int err = -ERANGE; if (strict_start_type && type >= strict_start_type) validate |= NL_VALIDATE_STRICT; if (type <= 0 || type > maxtype) return 0; type = array_index_nospec(type, maxtype + 1); pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) { pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", current->comm, type); if (validate & NL_VALIDATE_STRICT_ATTRS) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "invalid attribute length"); return -EINVAL; } } if (validate & NL_VALIDATE_NESTED) { if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) && !(nla->nla_type & NLA_F_NESTED)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "NLA_F_NESTED is missing"); return -EINVAL; } if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY && pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "NLA_F_NESTED not expected"); return -EINVAL; } } switch (pt->type) { case NLA_REJECT: if (extack && pt->reject_message) { NL_SET_BAD_ATTR(extack, nla); extack->_msg = pt->reject_message; return -EINVAL; } err = -EINVAL; goto out_err; case NLA_FLAG: if (attrlen > 0) goto out_err; break; case NLA_SINT: case NLA_UINT: if (attrlen != sizeof(u32) && attrlen != sizeof(u64)) { NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "invalid attribute length"); return -EINVAL; } break; case NLA_BITFIELD32: if (attrlen != sizeof(struct nla_bitfield32)) goto out_err; err = validate_nla_bitfield32(nla, pt->bitfield32_valid); if (err) goto out_err; break; case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); else minlen = attrlen; if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) { err = -EINVAL; goto out_err; } fallthrough; case NLA_STRING: if (attrlen < 1) goto out_err; if (pt->len) { char *buf = nla_data(nla); if (buf[attrlen - 1] == '\0') attrlen--; if (attrlen > pt->len) goto out_err; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) goto out_err; break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; if (attrlen < NLA_HDRLEN) goto out_err; if (pt->nested_policy) { err = __nla_validate_parse(nla_data(nla), nla_len(nla), pt->len, pt->nested_policy, validate, extack, NULL, depth + 1); if (err < 0) { /* * return directly to preserve the inner * error message/attribute pointer */ return err; } } break; case NLA_NESTED_ARRAY: /* a nested array attribute is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; if (attrlen < NLA_HDRLEN) goto out_err; if (pt->nested_policy) { int err; err = nla_validate_array(nla_data(nla), nla_len(nla), pt->len, pt->nested_policy, extack, validate, depth); if (err < 0) { /* * return directly to preserve the inner * error message/attribute pointer */ return err; } } break; case NLA_UNSPEC: if (validate & NL_VALIDATE_UNSPEC) { NL_SET_ERR_MSG_ATTR(extack, nla, "Unsupported attribute"); return -EINVAL; } if (attrlen < pt->len) goto out_err; break; default: if (pt->len) minlen = pt->len; else minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) goto out_err; } /* further validation */ switch (pt->validation_type) { case NLA_VALIDATE_NONE: /* nothing to do */ break; case NLA_VALIDATE_RANGE_PTR: case NLA_VALIDATE_RANGE: case NLA_VALIDATE_RANGE_WARN_TOO_LONG: case NLA_VALIDATE_MIN: case NLA_VALIDATE_MAX: err = nla_validate_int_range(pt, nla, extack, validate); if (err) return err; break; case NLA_VALIDATE_MASK: err = nla_validate_mask(pt, nla, extack); if (err) return err; break; case NLA_VALIDATE_FUNCTION: if (pt->validate) { err = pt->validate(nla, extack); if (err) return err; } break; } return 0; out_err: NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt, "Attribute failed policy validation"); return err; } static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack, struct nlattr **tb, unsigned int depth) { const struct nlattr *nla; int rem; if (depth >= MAX_POLICY_RECURSION_DEPTH) { NL_SET_ERR_MSG(extack, "allowed policy recursion depth exceeded"); return -EINVAL; } if (tb) memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); if (type == 0 || type > maxtype) { if (validate & NL_VALIDATE_MAXTYPE) { NL_SET_ERR_MSG_ATTR(extack, nla, "Unknown attribute type"); return -EINVAL; } continue; } type = array_index_nospec(type, maxtype + 1); if (policy) { int err = validate_nla(nla, maxtype, policy, validate, extack, depth); if (err < 0) return err; } if (tb) tb[type] = (struct nlattr *)nla; } if (unlikely(rem > 0)) { pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", rem, current->comm); NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes"); if (validate & NL_VALIDATE_TRAILING) return -EINVAL; } return 0; } /** * __nla_validate - Validate a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @maxtype: maximum attribute type to be expected * @policy: validation policy * @validate: validation strictness * @extack: extended ACK report struct * * Validates all attributes in the specified attribute stream against the * specified policy. Validation depends on the validate flags passed, see * &enum netlink_validation for more details on that. * See documentation of struct nla_policy for more details. * * Returns 0 on success or a negative error code. */ int __nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack) { return __nla_validate_parse(head, len, maxtype, policy, validate, extack, NULL, 0); } EXPORT_SYMBOL(__nla_validate); /** * nla_policy_len - Determine the max. length of a policy * @p: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used * to allocated Netlink buffers roughly the size of the actual * message. * * Returns 0 on success or a negative error code. */ int nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; for (i = 0; i < n; i++, p++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_len[p->type]) len += nla_total_size(nla_attr_len[p->type]); else if (nla_attr_minlen[p->type]) len += nla_total_size(nla_attr_minlen[p->type]); } return len; } EXPORT_SYMBOL(nla_policy_len); /** * __nla_parse - Parse a stream of attributes into a tb buffer * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @head: head of attribute stream * @len: length of attribute stream * @policy: validation policy * @validate: validation strictness * @extack: extended ACK pointer * * Parses a stream of attributes and stores a pointer to each attribute in * the tb array accessible via the attribute type. * Validation is controlled by the @validate parameter. * * Returns 0 on success or a negative error code. */ int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy, unsigned int validate, struct netlink_ext_ack *extack) { return __nla_validate_parse(head, len, maxtype, policy, validate, extack, tb, 0); } EXPORT_SYMBOL(__nla_parse); /** * nla_find - Find a specific attribute in a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @attrtype: type of attribute to look for * * Returns the first attribute in the stream matching the specified type. */ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype) { const struct nlattr *nla; int rem; nla_for_each_attr(nla, head, len, rem) if (nla_type(nla) == attrtype) return (struct nlattr *)nla; return NULL; } EXPORT_SYMBOL(nla_find); /** * nla_strscpy - Copy string attribute payload into a sized buffer * @dst: Where to copy the string to. * @nla: Attribute to copy the string from. * @dstsize: Size of destination buffer. * * Copies at most dstsize - 1 bytes into the destination buffer. * Unlike strscpy() the destination buffer is always padded out. * * Return: * * srclen - Returns @nla length (not including the trailing %NUL). * * -E2BIG - If @dstsize is 0 or greater than U16_MAX or @nla length greater * than @dstsize. */ ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize) { size_t srclen = nla_len(nla); char *src = nla_data(nla); ssize_t ret; size_t len; if (dstsize == 0 || WARN_ON_ONCE(dstsize > U16_MAX)) return -E2BIG; if (srclen > 0 && src[srclen - 1] == '\0') srclen--; if (srclen >= dstsize) { len = dstsize - 1; ret = -E2BIG; } else { len = srclen; ret = len; } memcpy(dst, src, len); /* Zero pad end of dst. */ memset(dst + len, 0, dstsize - len); return ret; } EXPORT_SYMBOL(nla_strscpy); /** * nla_strdup - Copy string attribute payload into a newly allocated buffer * @nla: attribute to copy the string from * @flags: the type of memory to allocate (see kmalloc). * * Returns a pointer to the allocated buffer or NULL on error. */ char *nla_strdup(const struct nlattr *nla, gfp_t flags) { size_t srclen = nla_len(nla); char *src = nla_data(nla), *dst; if (srclen > 0 && src[srclen - 1] == '\0') srclen--; dst = kmalloc(srclen + 1, flags); if (dst != NULL) { memcpy(dst, src, srclen); dst[srclen] = '\0'; } return dst; } EXPORT_SYMBOL(nla_strdup); /** * nla_memcpy - Copy a netlink attribute into another memory area * @dest: where to copy to memcpy * @src: netlink attribute to copy from * @count: size of the destination area * * Note: The number of bytes copied is limited by the length of * attribute's payload. memcpy * * Returns the number of bytes copied. */ int nla_memcpy(void *dest, const struct nlattr *src, int count) { int minlen = min_t(int, count, nla_len(src)); memcpy(dest, nla_data(src), minlen); if (count > minlen) memset(dest + minlen, 0, count - minlen); return minlen; } EXPORT_SYMBOL(nla_memcpy); /** * nla_memcmp - Compare an attribute with sized memory area * @nla: netlink attribute * @data: memory area * @size: size of memory area */ int nla_memcmp(const struct nlattr *nla, const void *data, size_t size) { int d = nla_len(nla) - size; if (d == 0) d = memcmp(nla_data(nla), data, size); return d; } EXPORT_SYMBOL(nla_memcmp); /** * nla_strcmp - Compare a string attribute against a string * @nla: netlink string attribute * @str: another string */ int nla_strcmp(const struct nlattr *nla, const char *str) { int len = strlen(str); char *buf = nla_data(nla); int attrlen = nla_len(nla); int d; while (attrlen > 0 && buf[attrlen - 1] == '\0') attrlen--; d = attrlen - len; if (d == 0) d = memcmp(nla_data(nla), str, len); return d; } EXPORT_SYMBOL(nla_strcmp); #ifdef CONFIG_NET /** * __nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; nla = skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen)); return nla; } EXPORT_SYMBOL(__nla_reserve); /** * __nla_reserve_64bit - reserve room for attribute on the skb and align it * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * @padattr: attribute type for the padding * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this * attribute will have a 64-bit aligned nla_data() area. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr) { nla_align_64bit(skb, padattr); return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL(__nla_reserve_64bit); /** * __nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * The caller is responsible to ensure that the skb provides enough * tailroom for the payload. */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { return skb_put_zero(skb, NLA_ALIGN(attrlen)); } EXPORT_SYMBOL(__nla_reserve_nohdr); /** * nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return NULL; return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL(nla_reserve); /** * nla_reserve_64bit - reserve room for attribute on the skb and align it * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * @padattr: attribute type for the padding * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. It also ensure that this * attribute will have a 64-bit aligned nla_data() area. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen, int padattr) { size_t len; if (nla_need_padding_for_64bit(skb)) len = nla_total_size_64bit(attrlen); else len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len)) return NULL; return __nla_reserve_64bit(skb, attrtype, attrlen, padattr); } EXPORT_SYMBOL(nla_reserve_64bit); /** * nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute payload. */ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return NULL; return __nla_reserve_nohdr(skb, attrlen); } EXPORT_SYMBOL(nla_reserve_nohdr); /** * __nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { struct nlattr *nla; nla = __nla_reserve(skb, attrtype, attrlen); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL(__nla_put); /** * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * @padattr: attribute type for the padding * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr) { struct nlattr *nla; nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL(__nla_put_64bit); /** * __nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute payload. */ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { void *start; start = __nla_reserve_nohdr(skb, attrlen); memcpy(start, data, attrlen); } EXPORT_SYMBOL(__nla_put_nohdr); /** * nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return -EMSGSIZE; __nla_put(skb, attrtype, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put); /** * nla_put_64bit - Add a netlink attribute to a socket buffer and align it * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * @padattr: attribute type for the padding * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, const void *data, int padattr) { size_t len; if (nla_need_padding_for_64bit(skb)) len = nla_total_size_64bit(attrlen); else len = nla_total_size(attrlen); if (unlikely(skb_tailroom(skb) < len)) return -EMSGSIZE; __nla_put_64bit(skb, attrtype, attrlen, data, padattr); return 0; } EXPORT_SYMBOL(nla_put_64bit); /** * nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; __nla_put_nohdr(skb, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put_nohdr); /** * nla_append - Add a netlink attribute without header or padding * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_append(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; skb_put_data(skb, data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); #endif
13 13 13 11 13 13 13 13 13 13 13 13 11 11 11 11 11 11 11 1 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 // SPDX-License-Identifier: GPL-2.0-or-later /* cx231xx-core.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver */ #include "cx231xx.h" #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include "cx231xx-reg.h" /* #define ENABLE_DEBUG_ISOC_FRAMES */ static unsigned int core_debug; module_param(core_debug, int, 0644); MODULE_PARM_DESC(core_debug, "enable debug messages [core]"); #define cx231xx_coredbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) static unsigned int reg_debug; module_param(reg_debug, int, 0644); MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]"); static int alt = CX231XX_PINOUT; module_param(alt, int, 0644); MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint"); #define cx231xx_isocdbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) /***************************************************************** * Device control list functions * ******************************************************************/ LIST_HEAD(cx231xx_devlist); static DEFINE_MUTEX(cx231xx_devlist_mutex); /* * cx231xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconnected or at module unload */ void cx231xx_remove_from_devlist(struct cx231xx *dev) { if (dev == NULL) return; if (dev->udev == NULL) return; if (atomic_read(&dev->devlist_count) > 0) { mutex_lock(&cx231xx_devlist_mutex); list_del(&dev->devlist); atomic_dec(&dev->devlist_count); mutex_unlock(&cx231xx_devlist_mutex); } }; void cx231xx_add_into_devlist(struct cx231xx *dev) { mutex_lock(&cx231xx_devlist_mutex); list_add_tail(&dev->devlist, &cx231xx_devlist); atomic_inc(&dev->devlist_count); mutex_unlock(&cx231xx_devlist_mutex); }; static LIST_HEAD(cx231xx_extension_devlist); int cx231xx_register_extension(struct cx231xx_ops *ops) { struct cx231xx *dev = NULL; mutex_lock(&cx231xx_devlist_mutex); list_add_tail(&ops->next, &cx231xx_extension_devlist); list_for_each_entry(dev, &cx231xx_devlist, devlist) { ops->init(dev); dev_info(dev->dev, "%s initialized\n", ops->name); } mutex_unlock(&cx231xx_devlist_mutex); return 0; } EXPORT_SYMBOL(cx231xx_register_extension); void cx231xx_unregister_extension(struct cx231xx_ops *ops) { struct cx231xx *dev = NULL; mutex_lock(&cx231xx_devlist_mutex); list_for_each_entry(dev, &cx231xx_devlist, devlist) { ops->fini(dev); dev_info(dev->dev, "%s removed\n", ops->name); } list_del(&ops->next); mutex_unlock(&cx231xx_devlist_mutex); } EXPORT_SYMBOL(cx231xx_unregister_extension); void cx231xx_init_extension(struct cx231xx *dev) { struct cx231xx_ops *ops = NULL; mutex_lock(&cx231xx_devlist_mutex); list_for_each_entry(ops, &cx231xx_extension_devlist, next) { if (ops->init) ops->init(dev); } mutex_unlock(&cx231xx_devlist_mutex); } void cx231xx_close_extension(struct cx231xx *dev) { struct cx231xx_ops *ops = NULL; mutex_lock(&cx231xx_devlist_mutex); list_for_each_entry(ops, &cx231xx_extension_devlist, next) { if (ops->fini) ops->fini(dev); } mutex_unlock(&cx231xx_devlist_mutex); } /**************************************************************** * U S B related functions * *****************************************************************/ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus, struct cx231xx_i2c_xfer_data *req_data) { int status = 0; struct cx231xx *dev = i2c_bus->dev; struct VENDOR_REQUEST_IN ven_req; u8 saddr_len = 0; u8 _i2c_period = 0; u8 _i2c_nostop = 0; u8 _i2c_reserve = 0; if (dev->state & DEV_DISCONNECTED) return -ENODEV; /* Get the I2C period, nostop and reserve parameters */ _i2c_period = i2c_bus->i2c_period; _i2c_nostop = i2c_bus->i2c_nostop; _i2c_reserve = i2c_bus->i2c_reserve; saddr_len = req_data->saddr_len; /* Set wValue */ ven_req.wValue = (req_data->dev_addr << 9 | _i2c_period << 4 | saddr_len << 2 | _i2c_nostop << 1 | I2C_SYNC | _i2c_reserve << 6); /* set channel number */ if (req_data->direction & I2C_M_RD) { /* channel number, for read,spec required channel_num +4 */ ven_req.bRequest = i2c_bus->nr + 4; } else ven_req.bRequest = i2c_bus->nr; /* channel number, */ /* set index value */ switch (saddr_len) { case 0: ven_req.wIndex = 0; /* need check */ break; case 1: ven_req.wIndex = (req_data->saddr_dat & 0xff); break; case 2: ven_req.wIndex = req_data->saddr_dat; break; } /* set wLength value */ ven_req.wLength = req_data->buf_size; /* set bData value */ ven_req.bData = 0; /* set the direction */ if (req_data->direction) { ven_req.direction = USB_DIR_IN; memset(req_data->p_buffer, 0x00, ven_req.wLength); } else ven_req.direction = USB_DIR_OUT; /* set the buffer for read / write */ ven_req.pBuff = req_data->p_buffer; /* call common vendor command request */ status = cx231xx_send_vendor_cmd(dev, &ven_req); if (status < 0 && !dev->i2c_scan_running) { dev_err(dev->dev, "%s: failed with status -%d\n", __func__, status); } return status; } EXPORT_SYMBOL_GPL(cx231xx_send_usb_command); /* * Sends/Receives URB control messages, assuring to use a kalloced buffer * for all operations (dev->urb_buf), to avoid using stacked buffers, as * they aren't safe for usage with USB, due to DMA restrictions. * Also implements the debug code for control URB's. */ static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { int rc, i; if (reg_debug) { printk(KERN_DEBUG "%s: (pipe 0x%08x): %s: %02x %02x %02x %02x %02x %02x %02x %02x ", dev->name, pipe, (requesttype & USB_DIR_IN) ? "IN" : "OUT", requesttype, request, value & 0xff, value >> 8, index & 0xff, index >> 8, size & 0xff, size >> 8); if (!(requesttype & USB_DIR_IN)) { printk(KERN_CONT ">>>"); for (i = 0; i < size; i++) printk(KERN_CONT " %02x", ((unsigned char *)data)[i]); } } /* Do the real call to usb_control_msg */ mutex_lock(&dev->ctrl_urb_lock); if (!(requesttype & USB_DIR_IN) && size) memcpy(dev->urb_buf, data, size); rc = usb_control_msg(dev->udev, pipe, request, requesttype, value, index, dev->urb_buf, size, timeout); if ((requesttype & USB_DIR_IN) && size) memcpy(data, dev->urb_buf, size); mutex_unlock(&dev->ctrl_urb_lock); if (reg_debug) { if (unlikely(rc < 0)) { printk(KERN_CONT "FAILED!\n"); return rc; } if ((requesttype & USB_DIR_IN)) { printk(KERN_CONT "<<<"); for (i = 0; i < size; i++) printk(KERN_CONT " %02x", ((unsigned char *)data)[i]); } printk(KERN_CONT "\n"); } return rc; } /* * cx231xx_read_ctrl_reg() * reads data from the usb device specifying bRequest and wValue */ int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len) { u8 val = 0; int ret; int pipe = usb_rcvctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if (len > URB_MAX_CTRL_SIZE) return -EINVAL; switch (len) { case 1: val = ENABLE_ONE_BYTE; break; case 2: val = ENABLE_TWE_BYTE; break; case 3: val = ENABLE_THREE_BYTE; break; case 4: val = ENABLE_FOUR_BYTE; break; default: val = 0xFF; /* invalid option */ } if (val == 0xFF) return -EINVAL; ret = __usb_control_msg(dev, pipe, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, reg, buf, len, HZ); return ret; } int cx231xx_send_vendor_cmd(struct cx231xx *dev, struct VENDOR_REQUEST_IN *ven_req) { int ret; int pipe = 0; int unsend_size = 0; u8 *pdata; if (dev->state & DEV_DISCONNECTED) return -ENODEV; if ((ven_req->wLength > URB_MAX_CTRL_SIZE)) return -EINVAL; if (ven_req->direction) pipe = usb_rcvctrlpipe(dev->udev, 0); else pipe = usb_sndctrlpipe(dev->udev, 0); /* * If the cx23102 read more than 4 bytes with i2c bus, * need chop to 4 byte per request */ if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) || (ven_req->bRequest == 0x5) || (ven_req->bRequest == 0x6) || /* Internal Master 3 Bus can send * and receive only 4 bytes per time */ (ven_req->bRequest == 0x2))) { unsend_size = 0; pdata = ven_req->pBuff; unsend_size = ven_req->wLength; /* the first package */ ven_req->wValue = ven_req->wValue & 0xFFFB; ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x2; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, 0x0004, HZ); unsend_size = unsend_size - 4; /* the middle package */ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x42; while (unsend_size - 4 > 0) { pdata = pdata + 4; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, 0x0004, HZ); unsend_size = unsend_size - 4; } /* the last package */ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x40; pdata = pdata + 4; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, unsend_size, HZ); } else { ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, ven_req->pBuff, ven_req->wLength, HZ); } return ret; } /* * cx231xx_write_ctrl_reg() * sends data to the usb device, specifying bRequest */ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len) { u8 val = 0; int ret; int pipe = usb_sndctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if ((len < 1) || (len > URB_MAX_CTRL_SIZE)) return -EINVAL; switch (len) { case 1: val = ENABLE_ONE_BYTE; break; case 2: val = ENABLE_TWE_BYTE; break; case 3: val = ENABLE_THREE_BYTE; break; case 4: val = ENABLE_FOUR_BYTE; break; default: val = 0xFF; /* invalid option */ } if (val == 0xFF) return -EINVAL; if (reg_debug) { int byte; cx231xx_isocdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>", pipe, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, req, 0, val, reg & 0xff, reg >> 8, len & 0xff, len >> 8); for (byte = 0; byte < len; byte++) cx231xx_isocdbg(" %02x", (unsigned char)buf[byte]); cx231xx_isocdbg("\n"); } ret = __usb_control_msg(dev, pipe, req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, reg, buf, len, HZ); return ret; } /**************************************************************** * USB Alternate Setting functions * *****************************************************************/ int cx231xx_set_video_alternate(struct cx231xx *dev) { int errCode, prev_alt = dev->video_mode.alt; unsigned int min_pkt_size = dev->width * 2 + 4; u32 usb_interface_index = 0; /* When image size is bigger than a certain value, the frame size should be increased, otherwise, only green screen will be received. */ if (dev->width * 2 * dev->height > 720 * 240 * 2) min_pkt_size *= 2; if (dev->width > 360) { /* resolutions: 720,704,640 */ dev->video_mode.alt = 3; } else if (dev->width > 180) { /* resolutions: 360,352,320,240 */ dev->video_mode.alt = 2; } else if (dev->width > 0) { /* resolutions: 180,176,160,128,88 */ dev->video_mode.alt = 1; } else { /* Change to alt0 BULK to release USB bandwidth */ dev->video_mode.alt = 0; } if (dev->USE_ISO == 0) dev->video_mode.alt = 0; cx231xx_coredbg("dev->video_mode.alt= %d\n", dev->video_mode.alt); /* Get the correct video interface Index */ usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. video_index + 1; if (dev->video_mode.alt != prev_alt) { cx231xx_coredbg("minimum isoc packet size: %u (alt=%d)\n", min_pkt_size, dev->video_mode.alt); if (dev->video_mode.alt_max_pkt_size != NULL) dev->video_mode.max_pkt_size = dev->video_mode.alt_max_pkt_size[dev->video_mode.alt]; cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n", dev->video_mode.alt, dev->video_mode.max_pkt_size); errCode = usb_set_interface(dev->udev, usb_interface_index, dev->video_mode.alt); if (errCode < 0) { dev_err(dev->dev, "cannot change alt number to %d (error=%i)\n", dev->video_mode.alt, errCode); return errCode; } } return 0; } int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt) { int status = 0; u32 usb_interface_index = 0; u32 max_pkt_size = 0; switch (index) { case INDEX_TS1: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. ts1_index + 1; dev->ts1_mode.alt = alt; if (dev->ts1_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->ts1_mode.max_pkt_size = dev->ts1_mode.alt_max_pkt_size[dev->ts1_mode.alt]; break; case INDEX_TS2: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. ts2_index + 1; break; case INDEX_AUDIO: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. audio_index + 1; dev->adev.alt = alt; if (dev->adev.alt_max_pkt_size != NULL) max_pkt_size = dev->adev.max_pkt_size = dev->adev.alt_max_pkt_size[dev->adev.alt]; break; case INDEX_VIDEO: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. video_index + 1; dev->video_mode.alt = alt; if (dev->video_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->video_mode.max_pkt_size = dev->video_mode.alt_max_pkt_size[dev->video_mode. alt]; break; case INDEX_VANC: if (dev->board.no_alt_vanc) return 0; usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. vanc_index + 1; dev->vbi_mode.alt = alt; if (dev->vbi_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->vbi_mode.max_pkt_size = dev->vbi_mode.alt_max_pkt_size[dev->vbi_mode.alt]; break; case INDEX_HANC: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. hanc_index + 1; dev->sliced_cc_mode.alt = alt; if (dev->sliced_cc_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->sliced_cc_mode.max_pkt_size = dev->sliced_cc_mode.alt_max_pkt_size[dev-> sliced_cc_mode. alt]; break; default: break; } if (alt > 0 && max_pkt_size == 0) { dev_err(dev->dev, "can't change interface %d alt no. to %d: Max. Pkt size = 0\n", usb_interface_index, alt); /*To workaround error number=-71 on EP0 for videograbber, need add following codes.*/ if (dev->board.no_alt_vanc) return -1; } cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,Interface = %d\n", alt, max_pkt_size, usb_interface_index); if (usb_interface_index > 0) { status = usb_set_interface(dev->udev, usb_interface_index, alt); if (status < 0) { dev_err(dev->dev, "can't change interface %d alt no. to %d (err=%i)\n", usb_interface_index, alt, status); return status; } } return status; } EXPORT_SYMBOL_GPL(cx231xx_set_alt_setting); int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio) { int rc = 0; if (!gpio) return rc; /* Send GPIO reset sequences specified at board entry */ while (gpio->sleep >= 0) { rc = cx231xx_set_gpio_value(dev, gpio->bit, gpio->val); if (rc < 0) return rc; if (gpio->sleep > 0) msleep(gpio->sleep); gpio++; } return rc; } int cx231xx_demod_reset(struct cx231xx *dev) { u8 status = 0; u8 value[4] = { 0, 0, 0, 0 }; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); cx231xx_coredbg("Enter cx231xx_demod_reset()\n"); value[1] = (u8) 0x3; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); value[1] = (u8) 0x0; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); value[1] = (u8) 0x3; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); return status; } EXPORT_SYMBOL_GPL(cx231xx_demod_reset); int is_fw_load(struct cx231xx *dev) { return cx231xx_check_fw(dev); } EXPORT_SYMBOL_GPL(is_fw_load); int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) { int errCode = 0; if (dev->mode == set_mode) return 0; if (set_mode == CX231XX_SUSPEND) { /* Set the chip in power saving mode */ dev->mode = set_mode; } /* Resource is locked */ if (dev->mode != CX231XX_SUSPEND) return -EINVAL; dev->mode = set_mode; if (dev->mode == CX231XX_DIGITAL_MODE)/* Set Digital power mode */ { /* set AGC mode to Digital */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_DIGITAL); break; default: break; } } else/* Set Analog Power mode */ { /* set AGC mode to Analog */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; default: break; } } if (errCode < 0) { dev_err(dev->dev, "Failed to set devmode to %s: error: %i", dev->mode == CX231XX_DIGITAL_MODE ? "digital" : "analog", errCode); return errCode; } return 0; } EXPORT_SYMBOL_GPL(cx231xx_set_mode); int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size) { int errCode = 0; int actlen = -1; int ret = -ENOMEM; u32 *buffer; buffer = kmemdup(firmware, EP5_BUF_SIZE, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5), buffer, EP5_BUF_SIZE, &actlen, EP5_TIMEOUT_MS); if (ret) dev_err(dev->dev, "bulk message failed: %d (%d/%d)", ret, size, actlen); else { errCode = actlen != size ? -1 : 0; } kfree(buffer); return errCode; } /***************************************************************** * URB Streaming functions * ******************************************************************/ /* * IRQ callback, called by URB callback */ static void cx231xx_isoc_irq_callback(struct urb *urb) { struct cx231xx_dmaqueue *dma_q = urb->context; struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode); unsigned long flags; int i; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ cx231xx_isocdbg("urb completion error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock_irqsave(&dev->video_mode.slock, flags); dev->video_mode.isoc_ctl.isoc_copy(dev, urb); spin_unlock_irqrestore(&dev->video_mode.slock, flags); /* Reset urb buffers */ for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { cx231xx_isocdbg("urb resubmit failed (error=%i)\n", urb->status); } } /***************************************************************** * URB Streaming functions * ******************************************************************/ /* * IRQ callback, called by URB callback */ static void cx231xx_bulk_irq_callback(struct urb *urb) { struct cx231xx_dmaqueue *dma_q = urb->context; struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode); unsigned long flags; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; case -EPIPE: /* stall */ cx231xx_isocdbg("urb completion error - device is stalled.\n"); return; default: /* error */ cx231xx_isocdbg("urb completion error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock_irqsave(&dev->video_mode.slock, flags); dev->video_mode.bulk_ctl.bulk_copy(dev, urb); spin_unlock_irqrestore(&dev->video_mode.slock, flags); /* Reset urb buffers */ urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { cx231xx_isocdbg("urb resubmit failed (error=%i)\n", urb->status); } } /* * Stop and Deallocate URBs */ void cx231xx_uninit_isoc(struct cx231xx *dev) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; struct urb *urb; int i; bool broken_pipe = false; cx231xx_isocdbg("cx231xx: called cx231xx_uninit_isoc\n"); dev->video_mode.isoc_ctl.nfields = -1; for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { urb = dev->video_mode.isoc_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->video_mode.isoc_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->video_mode.isoc_ctl. transfer_buffer[i], urb->transfer_dma); } if (urb->status == -EPIPE) { broken_pipe = true; } usb_free_urb(urb); dev->video_mode.isoc_ctl.urb[i] = NULL; } dev->video_mode.isoc_ctl.transfer_buffer[i] = NULL; } if (broken_pipe) { cx231xx_isocdbg("Reset endpoint to recover broken pipe."); usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr); } kfree(dev->video_mode.isoc_ctl.urb); kfree(dev->video_mode.isoc_ctl.transfer_buffer); kfree(dma_q->p_left_data); dev->video_mode.isoc_ctl.urb = NULL; dev->video_mode.isoc_ctl.transfer_buffer = NULL; dev->video_mode.isoc_ctl.num_bufs = 0; dma_q->p_left_data = NULL; if (dev->mode_tv == 0) cx231xx_capture_start(dev, 0, Raw_Video); else cx231xx_capture_start(dev, 0, TS1_serial_mode); } EXPORT_SYMBOL_GPL(cx231xx_uninit_isoc); /* * Stop and Deallocate URBs */ void cx231xx_uninit_bulk(struct cx231xx *dev) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; struct urb *urb; int i; bool broken_pipe = false; cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n"); dev->video_mode.bulk_ctl.nfields = -1; for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { urb = dev->video_mode.bulk_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->video_mode.bulk_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->video_mode.bulk_ctl. transfer_buffer[i], urb->transfer_dma); } if (urb->status == -EPIPE) { broken_pipe = true; } usb_free_urb(urb); dev->video_mode.bulk_ctl.urb[i] = NULL; } dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL; } if (broken_pipe) { cx231xx_isocdbg("Reset endpoint to recover broken pipe."); usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr); } kfree(dev->video_mode.bulk_ctl.urb); kfree(dev->video_mode.bulk_ctl.transfer_buffer); kfree(dma_q->p_left_data); dev->video_mode.bulk_ctl.urb = NULL; dev->video_mode.bulk_ctl.transfer_buffer = NULL; dev->video_mode.bulk_ctl.num_bufs = 0; dma_q->p_left_data = NULL; if (dev->mode_tv == 0) cx231xx_capture_start(dev, 0, Raw_Video); else cx231xx_capture_start(dev, 0, TS1_serial_mode); } EXPORT_SYMBOL_GPL(cx231xx_uninit_bulk); /* * Allocate URBs and start IRQ */ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*isoc_copy) (struct cx231xx *dev, struct urb *urb)) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int j, k; int rc; /* De-allocates all pending stuff */ cx231xx_uninit_isoc(dev); dma_q->p_left_data = kzalloc(EP5_BUF_SIZE, GFP_KERNEL); if (dma_q->p_left_data == NULL) return -ENOMEM; dev->video_mode.isoc_ctl.isoc_copy = isoc_copy; dev->video_mode.isoc_ctl.num_bufs = num_bufs; dma_q->pos = 0; dma_q->is_partial_line = 0; dma_q->last_sav = 0; dma_q->current_field = -1; dma_q->field1_done = 0; dma_q->lines_per_field = dev->height / 2; dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_completed = 0; dma_q->mpeg_buffer_done = 0; dma_q->left_data_count = 0; dma_q->mpeg_buffer_completed = 0; dma_q->add_ps_package_head = CX231XX_NEED_ADD_PS_PACKAGE_HEAD; dma_q->ps_head[0] = 0x00; dma_q->ps_head[1] = 0x00; dma_q->ps_head[2] = 0x01; dma_q->ps_head[3] = 0xBA; for (i = 0; i < 8; i++) dma_q->partial_buf[i] = 0; dev->video_mode.isoc_ctl.urb = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.isoc_ctl.urb) { dev_err(dev->dev, "cannot alloc memory for usb buffers\n"); kfree(dma_q->p_left_data); return -ENOMEM; } dev->video_mode.isoc_ctl.transfer_buffer = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.isoc_ctl.transfer_buffer) { dev_err(dev->dev, "cannot allocate memory for usbtransfer\n"); kfree(dev->video_mode.isoc_ctl.urb); kfree(dma_q->p_left_data); return -ENOMEM; } dev->video_mode.isoc_ctl.max_pkt_size = max_pkt_size; dev->video_mode.isoc_ctl.buf = NULL; sb_size = max_packets * dev->video_mode.isoc_ctl.max_pkt_size; if (dev->mode_tv == 1) dev->video_mode.end_point_addr = 0x81; else dev->video_mode.end_point_addr = 0x84; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { urb = usb_alloc_urb(max_packets, GFP_KERNEL); if (!urb) { cx231xx_uninit_isoc(dev); return -ENOMEM; } dev->video_mode.isoc_ctl.urb[i] = urb; dev->video_mode.isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) { dev_err(dev->dev, "unable to allocate %i bytes for transfer buffer %i\n", sb_size, i); cx231xx_uninit_isoc(dev); return -ENOMEM; } memset(dev->video_mode.isoc_ctl.transfer_buffer[i], 0, sb_size); pipe = usb_rcvisocpipe(dev->udev, dev->video_mode.end_point_addr); usb_fill_int_urb(urb, dev->udev, pipe, dev->video_mode.isoc_ctl.transfer_buffer[i], sb_size, cx231xx_isoc_irq_callback, dma_q, 1); urb->number_of_packets = max_packets; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; k = 0; for (j = 0; j < max_packets; j++) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = dev->video_mode.isoc_ctl.max_pkt_size; k += dev->video_mode.isoc_ctl.max_pkt_size; } } init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->video_mode.isoc_ctl.urb[i], GFP_ATOMIC); if (rc) { dev_err(dev->dev, "submit of urb %i failed (error=%i)\n", i, rc); cx231xx_uninit_isoc(dev); return rc; } } if (dev->mode_tv == 0) cx231xx_capture_start(dev, 1, Raw_Video); else cx231xx_capture_start(dev, 1, TS1_serial_mode); return 0; } EXPORT_SYMBOL_GPL(cx231xx_init_isoc); /* * Allocate URBs and start IRQ */ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*bulk_copy) (struct cx231xx *dev, struct urb *urb)) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int rc; dev->video_input = dev->video_input > 2 ? 2 : dev->video_input; cx231xx_coredbg("Setting Video mux to %d\n", dev->video_input); video_mux(dev, dev->video_input); /* De-allocates all pending stuff */ cx231xx_uninit_bulk(dev); dev->video_mode.bulk_ctl.bulk_copy = bulk_copy; dev->video_mode.bulk_ctl.num_bufs = num_bufs; dma_q->pos = 0; dma_q->is_partial_line = 0; dma_q->last_sav = 0; dma_q->current_field = -1; dma_q->field1_done = 0; dma_q->lines_per_field = dev->height / 2; dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_completed = 0; dma_q->mpeg_buffer_done = 0; dma_q->left_data_count = 0; dma_q->mpeg_buffer_completed = 0; dma_q->ps_head[0] = 0x00; dma_q->ps_head[1] = 0x00; dma_q->ps_head[2] = 0x01; dma_q->ps_head[3] = 0xBA; for (i = 0; i < 8; i++) dma_q->partial_buf[i] = 0; dev->video_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.bulk_ctl.urb) { dev_err(dev->dev, "cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->video_mode.bulk_ctl.transfer_buffer = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); if (!dev->video_mode.bulk_ctl.transfer_buffer) { dev_err(dev->dev, "cannot allocate memory for usbtransfer\n"); kfree(dev->video_mode.bulk_ctl.urb); return -ENOMEM; } dev->video_mode.bulk_ctl.max_pkt_size = max_pkt_size; dev->video_mode.bulk_ctl.buf = NULL; sb_size = max_packets * dev->video_mode.bulk_ctl.max_pkt_size; if (dev->mode_tv == 1) dev->video_mode.end_point_addr = 0x81; else dev->video_mode.end_point_addr = 0x84; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { cx231xx_uninit_bulk(dev); return -ENOMEM; } dev->video_mode.bulk_ctl.urb[i] = urb; urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; dev->video_mode.bulk_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) { dev_err(dev->dev, "unable to allocate %i bytes for transfer buffer %i\n", sb_size, i); cx231xx_uninit_bulk(dev); return -ENOMEM; } memset(dev->video_mode.bulk_ctl.transfer_buffer[i], 0, sb_size); pipe = usb_rcvbulkpipe(dev->udev, dev->video_mode.end_point_addr); usb_fill_bulk_urb(urb, dev->udev, pipe, dev->video_mode.bulk_ctl.transfer_buffer[i], sb_size, cx231xx_bulk_irq_callback, dma_q); } /* clear halt */ rc = usb_clear_halt(dev->udev, dev->video_mode.bulk_ctl.urb[0]->pipe); if (rc < 0) { dev_err(dev->dev, "failed to clear USB bulk endpoint stall/halt condition (error=%i)\n", rc); cx231xx_uninit_bulk(dev); return rc; } init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i], GFP_ATOMIC); if (rc) { dev_err(dev->dev, "submit of urb %i failed (error=%i)\n", i, rc); cx231xx_uninit_bulk(dev); return rc; } } if (dev->mode_tv == 0) cx231xx_capture_start(dev, 1, Raw_Video); else cx231xx_capture_start(dev, 1, TS1_serial_mode); return 0; } EXPORT_SYMBOL_GPL(cx231xx_init_bulk); void cx231xx_stop_TS1(struct cx231xx *dev) { u8 val[4] = { 0, 0, 0, 0 }; val[0] = 0x00; val[1] = 0x03; val[2] = 0x00; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS_MODE_REG, val, 4); val[0] = 0x00; val[1] = 0x70; val[2] = 0x04; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); } /* EXPORT_SYMBOL_GPL(cx231xx_stop_TS1); */ void cx231xx_start_TS1(struct cx231xx *dev) { u8 val[4] = { 0, 0, 0, 0 }; val[0] = 0x03; val[1] = 0x03; val[2] = 0x00; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS_MODE_REG, val, 4); val[0] = 0x04; val[1] = 0xA3; val[2] = 0x3B; val[3] = 0x00; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); } /* EXPORT_SYMBOL_GPL(cx231xx_start_TS1); */ /***************************************************************** * Device Init/UnInit functions * ******************************************************************/ int cx231xx_dev_init(struct cx231xx *dev) { int errCode = 0; /* Initialize I2C bus */ /* External Master 1 Bus */ dev->i2c_bus[0].nr = 0; dev->i2c_bus[0].dev = dev; dev->i2c_bus[0].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[0].i2c_nostop = 0; dev->i2c_bus[0].i2c_reserve = 0; dev->i2c_bus[0].i2c_rc = -ENODEV; /* External Master 2 Bus */ dev->i2c_bus[1].nr = 1; dev->i2c_bus[1].dev = dev; dev->i2c_bus[1].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[1].i2c_nostop = 0; dev->i2c_bus[1].i2c_reserve = 0; dev->i2c_bus[1].i2c_rc = -ENODEV; /* Internal Master 3 Bus */ dev->i2c_bus[2].nr = 2; dev->i2c_bus[2].dev = dev; dev->i2c_bus[2].i2c_period = I2C_SPEED_100K; /* 100kHz */ dev->i2c_bus[2].i2c_nostop = 0; dev->i2c_bus[2].i2c_reserve = 0; dev->i2c_bus[2].i2c_rc = -ENODEV; /* register I2C buses */ errCode = cx231xx_i2c_register(&dev->i2c_bus[0]); if (errCode < 0) return errCode; errCode = cx231xx_i2c_register(&dev->i2c_bus[1]); if (errCode < 0) return errCode; errCode = cx231xx_i2c_register(&dev->i2c_bus[2]); if (errCode < 0) return errCode; errCode = cx231xx_i2c_mux_create(dev); if (errCode < 0) { dev_err(dev->dev, "%s: Failed to create I2C mux\n", __func__); return errCode; } errCode = cx231xx_i2c_mux_register(dev, 0); if (errCode < 0) return errCode; errCode = cx231xx_i2c_mux_register(dev, 1); if (errCode < 0) return errCode; /* scan the real bus segments in the order of physical port numbers */ cx231xx_do_i2c_scan(dev, I2C_0); cx231xx_do_i2c_scan(dev, I2C_1_MUX_1); cx231xx_do_i2c_scan(dev, I2C_2); cx231xx_do_i2c_scan(dev, I2C_1_MUX_3); /* init hardware */ /* Note : with out calling set power mode function, afe can not be set up correctly */ if (dev->board.external_av) { errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ENXTERNAL_AV); if (errCode < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, errCode); return errCode; } } else { errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV); if (errCode < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, errCode); return errCode; } } /* reset the Tuner, if it is a Xceive tuner */ if ((dev->board.tuner_type == TUNER_XC5000) || (dev->board.tuner_type == TUNER_XC2028)) cx231xx_gpio_set(dev, dev->board.tuner_gpio); /* initialize Colibri block */ errCode = cx231xx_afe_init_super_block(dev, 0x23c); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_afe init super block - errCode [%d]!\n", __func__, errCode); return errCode; } errCode = cx231xx_afe_init_channels(dev); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_afe init channels - errCode [%d]!\n", __func__, errCode); return errCode; } /* Set DIF in By pass mode */ errCode = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode - errCode [%d]!\n", __func__, errCode); return errCode; } /* I2S block related functions */ errCode = cx231xx_i2s_blk_initialize(dev); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_i2s block initialize - errCode [%d]!\n", __func__, errCode); return errCode; } /* init control pins */ errCode = cx231xx_init_ctrl_pin_status(dev); if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_init ctrl pins - errCode [%d]!\n", __func__, errCode); return errCode; } /* set AGC mode to Analog */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; default: break; } if (errCode < 0) { dev_err(dev->dev, "%s: cx231xx_AGC mode to Analog - errCode [%d]!\n", __func__, errCode); return errCode; } /* set all alternate settings to zero initially */ cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0); cx231xx_set_alt_setting(dev, INDEX_VANC, 0); cx231xx_set_alt_setting(dev, INDEX_HANC, 0); if (dev->board.has_dvb) cx231xx_set_alt_setting(dev, INDEX_TS1, 0); errCode = 0; return errCode; } EXPORT_SYMBOL_GPL(cx231xx_dev_init); void cx231xx_dev_uninit(struct cx231xx *dev) { /* Un Initialize I2C bus */ cx231xx_i2c_mux_unregister(dev); cx231xx_i2c_unregister(&dev->i2c_bus[2]); cx231xx_i2c_unregister(&dev->i2c_bus[1]); cx231xx_i2c_unregister(&dev->i2c_bus[0]); } EXPORT_SYMBOL_GPL(cx231xx_dev_uninit); /***************************************************************** * G P I O related functions * ******************************************************************/ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val, u8 len, u8 request, u8 direction) { int status = 0; struct VENDOR_REQUEST_IN ven_req; /* Set wValue */ ven_req.wValue = (u16) (gpio_bit >> 16 & 0xffff); /* set request */ if (!request) { if (direction) ven_req.bRequest = VRT_GET_GPIO; /* 0x9 gpio */ else ven_req.bRequest = VRT_SET_GPIO; /* 0x8 gpio */ } else { if (direction) ven_req.bRequest = VRT_GET_GPIE; /* 0xb gpie */ else ven_req.bRequest = VRT_SET_GPIE; /* 0xa gpie */ } /* set index value */ ven_req.wIndex = (u16) (gpio_bit & 0xffff); /* set wLength value */ ven_req.wLength = len; /* set bData value */ ven_req.bData = 0; /* set the buffer for read / write */ ven_req.pBuff = gpio_val; /* set the direction */ if (direction) { ven_req.direction = USB_DIR_IN; memset(ven_req.pBuff, 0x00, ven_req.wLength); } else ven_req.direction = USB_DIR_OUT; /* call common vendor command request */ status = cx231xx_send_vendor_cmd(dev, &ven_req); if (status < 0) { dev_err(dev->dev, "%s: failed with status -%d\n", __func__, status); } return status; } EXPORT_SYMBOL_GPL(cx231xx_send_gpio_cmd); /***************************************************************** * C O N T R O L - Register R E A D / W R I T E functions * *****************************************************************/ int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, address, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); tmp |= mode; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, address, value, 4); return status; } /***************************************************************** * I 2 C Internal C O N T R O L functions * *****************************************************************/ int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len, int master) { int status = 0; struct cx231xx_i2c_xfer_data req_data; u8 value[64] = "0"; if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = I2C_M_RD; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = (u8 *) value; /* usb send command */ if (master == 0) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); else if (master == 1) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1], &req_data); else if (master == 2) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2], &req_data); if (status >= 0) { /* Copy the data read back to main buffer */ if (data_len == 1) *data = value[0]; else if (data_len == 4) *data = value[0] | value[1] << 8 | value[2] << 16 | value[3] << 24; else if (data_len > 4) *data = value[saddr]; } return status; } int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len, int master) { int status = 0; u8 value[4] = { 0, 0, 0, 0 }; struct cx231xx_i2c_xfer_data req_data; value[0] = (u8) data; value[1] = (u8) (data >> 8); value[2] = (u8) (data >> 16); value[3] = (u8) (data >> 24); if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = 0; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = value; /* usb send command */ if (master == 0) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); else if (master == 1) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1], &req_data); else if (master == 2) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2], &req_data); return status; } int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len) { int status = 0; struct cx231xx_i2c_xfer_data req_data; u8 value[4] = { 0, 0, 0, 0 }; if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = I2C_M_RD; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = (u8 *) value; /* usb send command */ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); if (status >= 0) { /* Copy the data read back to main buffer */ if (data_len == 1) *data = value[0]; else *data = value[0] | value[1] << 8 | value[2] << 16 | value[3] << 24; } return status; } int cx231xx_write_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len) { int status = 0; u8 value[4] = { 0, 0, 0, 0 }; struct cx231xx_i2c_xfer_data req_data; value[0] = (u8) data; value[1] = (u8) (data >> 8); value[2] = (u8) (data >> 16); value[3] = (u8) (data >> 24); if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = 0; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = value; /* usb send command */ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); return status; } int cx231xx_reg_mask_write(struct cx231xx *dev, u8 dev_addr, u8 size, u16 register_address, u8 bit_start, u8 bit_end, u32 value) { int status = 0; u32 tmp; u32 mask = 0; int i; if (bit_start > (size - 1) || bit_end > (size - 1)) return -1; if (size == 8) { status = cx231xx_read_i2c_data(dev, dev_addr, register_address, 2, &tmp, 1); } else { status = cx231xx_read_i2c_data(dev, dev_addr, register_address, 2, &tmp, 4); } if (status < 0) return status; mask = 1 << bit_end; for (i = bit_end; i > bit_start && i > 0; i--) mask = mask + (1 << (i - 1)); value <<= bit_start; if (size == 8) { tmp &= ~mask; tmp |= value; tmp &= 0xff; status = cx231xx_write_i2c_data(dev, dev_addr, register_address, 2, tmp, 1); } else { tmp &= ~mask; tmp |= value; status = cx231xx_write_i2c_data(dev, dev_addr, register_address, 2, tmp, 4); } return status; } int cx231xx_read_modify_write_i2c_dword(struct cx231xx *dev, u8 dev_addr, u16 saddr, u32 mask, u32 value) { u32 temp; int status = 0; status = cx231xx_read_i2c_data(dev, dev_addr, saddr, 2, &temp, 4); if (status < 0) return status; temp &= ~mask; temp |= value; status = cx231xx_write_i2c_data(dev, dev_addr, saddr, 2, temp, 4); return status; } u32 cx231xx_set_field(u32 field_mask, u32 data) { u32 temp; for (temp = field_mask; (temp & 1) == 0; temp >>= 1) data <<= 1; return data; }
3182 3175 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_WAIT_H #define _LINUX_WAIT_H /* * Linux wait queue related types and methods */ #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <asm/current.h> typedef struct wait_queue_entry wait_queue_entry_t; typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); /* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 #define WQ_FLAG_CUSTOM 0x04 #define WQ_FLAG_DONE 0x08 #define WQ_FLAG_PRIORITY 0x10 /* * A single wait-queue entry structure: */ struct wait_queue_entry { unsigned int flags; void *private; wait_queue_func_t func; struct list_head entry; }; struct wait_queue_head { spinlock_t lock; struct list_head head; }; typedef struct wait_queue_head wait_queue_head_t; struct task_struct; /* * Macros for declaration and initialisaton of the datatypes */ #define __WAITQUEUE_INITIALIZER(name, tsk) { \ .private = tsk, \ .func = default_wake_function, \ .entry = { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = LIST_HEAD_INIT(name.head) } #define DECLARE_WAIT_QUEUE_HEAD(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); #define init_waitqueue_head(wq_head) \ do { \ static struct lock_class_key __key; \ \ __init_waitqueue_head((wq_head), #wq_head, &__key); \ } while (0) #ifdef CONFIG_LOCKDEP # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ({ init_waitqueue_head(&name); name; }) # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) #else # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) #endif static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p) { wq_entry->flags = 0; wq_entry->private = p; wq_entry->func = default_wake_function; } static inline void init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func) { wq_entry->flags = 0; wq_entry->private = NULL; wq_entry->func = func; } /** * waitqueue_active -- locklessly test for waiters on the queue * @wq_head: the waitqueue to test for waiters * * returns true if the wait list is not empty * * NOTE: this function is lockless and requires care, incorrect usage _will_ * lead to sporadic and non-obvious failure. * * Use either while holding wait_queue_head::lock or when used for wakeups * with an extra smp_mb() like:: * * CPU0 - waker CPU1 - waiter * * for (;;) { * @cond = true; prepare_to_wait(&wq_head, &wait, state); * smp_mb(); // smp_mb() from set_current_state() * if (waitqueue_active(wq_head)) if (@cond) * wake_up(wq_head); break; * schedule(); * } * finish_wait(&wq_head, &wait); * * Because without the explicit smp_mb() it's possible for the * waitqueue_active() load to get hoisted over the @cond store such that we'll * observe an empty wait list while the waiter might not observe @cond. * * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), * which (when the lock is uncontended) are of roughly equal cost. */ static inline int waitqueue_active(struct wait_queue_head *wq_head) { return !list_empty(&wq_head->head); } /** * wq_has_single_sleeper - check if there is only one sleeper * @wq_head: wait queue head * * Returns true of wq_head has only one sleeper on the list. * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) { return list_is_singular(&wq_head->head); } /** * wq_has_sleeper - check if there are any waiting processes * @wq_head: wait queue head * * Returns true if wq_head has waiting processes * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) { /* * We need to be sure we are in sync with the * add_wait_queue modifications to the wait queue. * * This memory barrier should be paired with one on the * waiting side. */ smp_mb(); return waitqueue_active(wq_head); } extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { struct list_head *head = &wq_head->head; struct wait_queue_entry *wq; list_for_each_entry(wq, &wq_head->head, entry) { if (!(wq->flags & WQ_FLAG_PRIORITY)) break; head = &wq->entry; } list_add(&wq_entry->entry, head); } /* * Used for wake-one threads: */ static inline void __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq_head, wq_entry); } static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_add_tail(&wq_entry->entry, &wq_head->head); } static inline void __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_entry_tail(wq_head, wq_entry); } static inline void __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_del(&wq_entry->entry); } int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) #define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL) #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) /* * Wakeup macros to be used to report events to the targets. */ #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) #define wake_up_poll_on_current_cpu(x, m) \ __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m)) #define wake_up_locked_poll(x, m) \ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) #define wake_up_interruptible_poll(x, m) \ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) #define wake_up_interruptible_sync_poll_locked(x, m) \ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) /** * wake_up_pollfree - signal that a polled waitqueue is going away * @wq_head: the wait queue head * * In the very rare cases where a ->poll() implementation uses a waitqueue whose * lifetime is tied to a task rather than to the 'struct file' being polled, * this function must be called before the waitqueue is freed so that * non-blocking polls (e.g. epoll) are notified that the queue is going away. * * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. */ static inline void wake_up_pollfree(struct wait_queue_head *wq_head) { /* * For performance reasons, we don't always take the queue lock here. * Therefore, we might race with someone removing the last entry from * the queue, and proceed while they still hold the queue lock. * However, rcu_read_lock() is required to be held in such cases, so we * can safely proceed with an RCU-delayed free. */ if (waitqueue_active(wq_head)) __wake_up_pollfree(wq_head); } #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ if (__cond && !__ret) \ __ret = 1; \ __cond || !__ret; \ }) #define ___wait_is_interruptible(state) \ (!__builtin_constant_p(state) || \ (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags); /* * The below macro ___wait_event() has an explicit shadow of the __ret * variable when used from the wait_event_*() macros. * * This is so that both can use the ___wait_cond_timeout() construct * to wrap the condition. * * The type inconsistency of the wait_event_*() __ret variable is also * on purpose; we use long where we can return timeout values and int * otherwise. */ #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \ ({ \ __label__ __out; \ struct wait_queue_entry __wq_entry; \ long __ret = ret; /* explicit shadow */ \ \ init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ for (;;) { \ long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\ \ if (condition) \ break; \ \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ goto __out; \ } \ \ cmd; \ \ if (condition) \ break; \ } \ finish_wait(&wq_head, &__wq_entry); \ __out: __ret; \ }) #define __wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __wait_event(wq_head, condition); \ } while (0) #define __io_wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ io_schedule()) /* * io_wait_event() -- like wait_event() but with io_schedule() */ #define io_wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __io_wait_event(wq_head, condition); \ } while (0) #define __wait_event_freezable(wq_head, condition) \ ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \ 0, 0, schedule()) /** * wait_event_freezable - sleep (or freeze) until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute * to system load) until the @condition evaluates to true. The * @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_freezable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable(wq_head, condition); \ __ret; \ }) #define __wait_event_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \ __ret = schedule_timeout(__ret)) /* * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid * increasing load and is freezable. */ #define wait_event_freezable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ cmd1; schedule(); cmd2) /* * Just like wait_event_cmd(), except it sets exclusive flag */ #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ cmd1; schedule(); cmd2) /** * wait_event_cmd - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @cmd1: the command will be executed before sleep * @cmd2: the command will be executed after sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_interruptible(wq_head, condition) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event_interruptible - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible(wq_head, condition); \ __ret; \ }) #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_INTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a signal. */ #define wait_event_interruptible_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_interruptible_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \ ({ \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ hrtimer_setup_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) { \ hrtimer_set_expires_range_ns(&__t.timer, timeout, \ current->timer_slack_ns); \ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ } \ \ __ret = ___wait_event(wq_head, condition, state, 0, 0, \ if (!__t.task) { \ __ret = -ETIME; \ break; \ } \ schedule()); \ \ hrtimer_cancel(&__t.timer); \ destroy_hrtimer_on_stack(&__t.timer); \ __ret; \ }) /** * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, or -ETIME if the timeout * elapsed. */ #define wait_event_hrtimeout(wq_head, condition, timeout) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /** * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, -ERESTARTSYS if it was * interrupted by a signal, or -ETIME if the timeout elapsed. */ #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ ({ \ long __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define __wait_event_interruptible_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ schedule()) #define wait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_killable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ schedule()) #define wait_event_killable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_freezable_exclusive(wq, condition) \ ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\ schedule()) #define wait_event_freezable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable_exclusive(wq, condition); \ __ret; \ }) /** * wait_event_idle - wait for a condition without contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \ } while (0) /** * wait_event_idle_exclusive - wait for a condition with contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle_exclusive(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \ } while (0) #define __wait_event_idle_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 1, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\ __ret; \ }) extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ ({ \ int __ret; \ DEFINE_WAIT(__wait); \ if (exclusive) \ __wait.flags |= WQ_FLAG_EXCLUSIVE; \ do { \ __ret = fn(&(wq), &__wait); \ if (__ret) \ break; \ } while (!(condition)); \ __remove_wait_queue(&(wq), &__wait); \ __set_current_state(TASK_RUNNING); \ __ret; \ }) /** * wait_event_interruptible_locked - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) /** * wait_event_interruptible_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) /** * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) /** * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) #define __wait_event_killable(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) /** * wait_event_killable - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_killable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable(wq_head, condition); \ __ret; \ }) #define __wait_event_state(wq, condition, state) \ ___wait_event(wq, condition, state, 0, 0, schedule()) /** * wait_event_state - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @state: state to sleep in * * The process is put to sleep (@state) until the @condition evaluates to true * or a signal is received (when allowed by @state). The @condition is checked * each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a signal * (when allowed by @state) and 0 if @condition evaluated to true. */ #define wait_event_state(wq_head, condition, state) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_state(wq_head, condition, state); \ __ret; \ }) #define __wait_event_killable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_KILLABLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a kill signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a kill signal. * * Only kill signals interrupt this process. */ #define wait_event_killable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_killable_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_lock_irq_cmd - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd * and schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. */ #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, cmd); \ } while (0) /** * wait_event_lock_irq - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. */ #define wait_event_lock_irq(wq_head, condition, lock) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, ); \ } while (0) #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. * The condition is checked under the lock. This is expected to * be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd and * schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock, cmd); \ __ret; \ }) /** * wait_event_interruptible_lock_irq - sleep until a condition gets true. * The condition is checked under the lock. This is expected * to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock,); \ __ret; \ }) #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ state, 0, timeout, \ spin_unlock_irq(&lock); \ __ret = schedule_timeout(__ret); \ spin_lock_irq(&lock)); /** * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets * true or a timeout elapses. The condition is checked under * the lock. This is expected to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it * was interrupted by a signal, and the remaining jiffies otherwise * if the condition evaluated to true before the timeout elapsed. */ #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \ timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /* * Waitqueues which are removed from the waitqueue_head at wakeup time */ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); #define DEFINE_WAIT_FUNC(name, function) \ struct wait_queue_entry name = { \ .private = current, \ .func = function, \ .entry = LIST_HEAD_INIT((name).entry), \ } #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) #define init_wait_func(wait, function) \ do { \ (wait)->private = current; \ (wait)->func = function; \ INIT_LIST_HEAD(&(wait)->entry); \ (wait)->flags = 0; \ } while (0) #define init_wait(wait) init_wait_func(wait, autoremove_wake_function) typedef int (*task_call_f)(struct task_struct *p, void *arg); extern int task_call_func(struct task_struct *p, task_call_f func, void *arg); #endif /* _LINUX_WAIT_H */
1 1 2 2 2 1 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Betop based devices * * The devices are distributed under various names and the same USB device ID * can be used in both adapters and actual game controllers. * * 0x11c2:0x2208 "BTP2185 BFM mode Joystick" * - tested with BTP2185 BFM Mode. * * 0x11C0:0x5506 "BTP2185 PC mode Joystick" * - tested with BTP2185 PC Mode. * * 0x8380:0x1850 "BTP2185 V2 PC mode USB Gamepad" * - tested with BTP2185 PC Mode with another version. * * 0x20bc:0x5500 "BTP2185 V2 BFM mode Joystick" * - tested with BTP2171s. * Copyright (c) 2014 Huang Bo <huangbobupt@163.com> */ /* */ #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/hid.h> #include "hid-ids.h" struct betopff_device { struct hid_report *report; }; static int hid_betopff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct betopff_device *betopff = data; __u16 left, right; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; betopff->report->field[2]->value[0] = left / 256; betopff->report->field[3]->value[0] = right / 256; hid_hw_request(hid, betopff->report, HID_REQ_SET_REPORT); return 0; } static int betopff_init(struct hid_device *hid) { struct betopff_device *betopff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev; int error; int i, j; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_first_entry(&hid->inputs, struct hid_input, list); dev = hidinput->input; if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } report = list_first_entry(report_list, struct hid_report, list); /* * Actually there are 4 fields for 4 Bytes as below: * ----------------------------------------- * Byte0 Byte1 Byte2 Byte3 * 0x00 0x00 left_motor right_motor * ----------------------------------------- * Do init them with default value. */ if (report->maxfield < 4) { hid_err(hid, "not enough fields in the report: %d\n", report->maxfield); return -ENODEV; } for (i = 0; i < report->maxfield; i++) { if (report->field[i]->report_count < 1) { hid_err(hid, "no values in the field\n"); return -ENODEV; } for (j = 0; j < report->field[i]->report_count; j++) { report->field[i]->value[j] = 0x00; } } betopff = kzalloc(sizeof(*betopff), GFP_KERNEL); if (!betopff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, betopff, hid_betopff_play); if (error) { kfree(betopff); return error; } betopff->report = report; hid_hw_request(hid, betopff->report, HID_REQ_SET_REPORT); hid_info(hid, "Force feedback for betop devices by huangbo <huangbobupt@163.com>\n"); return 0; } static int betop_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; if (id->driver_data) hdev->quirks |= HID_QUIRK_MULTI_INPUT; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } betopff_init(hdev); return 0; err: return ret; } static const struct hid_device_id betop_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, { } }; MODULE_DEVICE_TABLE(hid, betop_devices); static struct hid_driver betop_driver = { .name = "betop", .id_table = betop_devices, .probe = betop_probe, }; module_hid_driver(betop_driver); MODULE_DESCRIPTION("Force feedback support for Betop based devices"); MODULE_LICENSE("GPL");
9 9 9 9 5 5 5 9 9 5 5 5 5 5 5 2 5 5 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007-2012 Siemens AG * * Written by: * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <net/netlink.h> #include <net/nl802154.h> #include <net/mac802154.h> #include <net/ieee802154_netdev.h> #include <net/route.h> #include <net/cfg802154.h> #include "ieee802154_i.h" #include "cfg.h" static void ieee802154_tasklet_handler(struct tasklet_struct *t) { struct ieee802154_local *local = from_tasklet(local, t, tasklet); struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue))) { switch (skb->pkt_type) { case IEEE802154_RX_MSG: /* Clear skb->pkt_type in order to not confuse kernel * netstack. */ skb->pkt_type = 0; ieee802154_rx(local, skb); break; default: WARN(1, "mac802154: Packet is of unknown type %d\n", skb->pkt_type); kfree_skb(skb); break; } } } struct ieee802154_hw * ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) { struct wpan_phy *phy; struct ieee802154_local *local; size_t priv_size; if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed || !ops->start || !ops->stop || !ops->set_channel)) return NULL; /* Ensure 32-byte alignment of our private data and hw private data. * We use the wpan_phy priv data for both our ieee802154_local and for * the driver's private data * * in memory it'll be like this: * * +-------------------------+ * | struct wpan_phy | * +-------------------------+ * | struct ieee802154_local | * +-------------------------+ * | driver's private data | * +-------------------------+ * * Due to ieee802154 layer isn't aware of driver and MAC structures, * so lets align them here. */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; phy = wpan_phy_new(&mac802154_config_ops, priv_size); if (!phy) { pr_err("failure to allocate master IEEE802.15.4 device\n"); return NULL; } phy->privid = mac802154_wpan_phy_privid; local = wpan_phy_priv(phy); local->phy = phy; local->hw.phy = local->phy; local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); local->ops = ops; INIT_LIST_HEAD(&local->interfaces); INIT_LIST_HEAD(&local->rx_beacon_list); INIT_LIST_HEAD(&local->rx_mac_cmd_list); mutex_init(&local->iflist_mtx); tasklet_setup(&local->tasklet, ieee802154_tasklet_handler); skb_queue_head_init(&local->skb_queue); INIT_WORK(&local->sync_tx_work, ieee802154_xmit_sync_worker); INIT_DELAYED_WORK(&local->scan_work, mac802154_scan_worker); INIT_WORK(&local->rx_beacon_work, mac802154_rx_beacon_worker); INIT_DELAYED_WORK(&local->beacon_work, mac802154_beacon_worker); INIT_WORK(&local->rx_mac_cmd_work, mac802154_rx_mac_cmd_worker); init_completion(&local->assoc_done); /* init supported flags with 802.15.4 default ranges */ phy->supported.max_minbe = 8; phy->supported.min_maxbe = 3; phy->supported.max_maxbe = 8; phy->supported.min_frame_retries = 0; phy->supported.max_frame_retries = 7; phy->supported.max_csma_backoffs = 5; phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE; /* always supported */ phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE) | BIT(NL802154_IFTYPE_COORD); return &local->hw; } EXPORT_SYMBOL(ieee802154_alloc_hw); void ieee802154_configure_durations(struct wpan_phy *phy, unsigned int page, unsigned int channel) { u32 duration = 0; switch (page) { case 0: if (BIT(channel) & 0x1) /* 868 MHz BPSK 802.15.4-2003: 20 ksym/s */ duration = 50 * NSEC_PER_USEC; else if (BIT(channel) & 0x7FE) /* 915 MHz BPSK 802.15.4-2003: 40 ksym/s */ duration = 25 * NSEC_PER_USEC; else if (BIT(channel) & 0x7FFF800) /* 2400 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */ duration = 16 * NSEC_PER_USEC; break; case 2: if (BIT(channel) & 0x1) /* 868 MHz O-QPSK 802.15.4-2006: 25 ksym/s */ duration = 40 * NSEC_PER_USEC; else if (BIT(channel) & 0x7FE) /* 915 MHz O-QPSK 802.15.4-2006: 62.5 ksym/s */ duration = 16 * NSEC_PER_USEC; break; case 3: if (BIT(channel) & 0x3FFF) /* 2.4 GHz CSS 802.15.4a-2007: 1/6 Msym/s */ duration = 6 * NSEC_PER_USEC; break; default: break; } if (!duration) { pr_debug("Unknown PHY symbol duration\n"); return; } phy->symbol_duration = duration; phy->lifs_period = (IEEE802154_LIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC; phy->sifs_period = (IEEE802154_SIFS_PERIOD * phy->symbol_duration) / NSEC_PER_USEC; } EXPORT_SYMBOL(ieee802154_configure_durations); void ieee802154_free_hw(struct ieee802154_hw *hw) { struct ieee802154_local *local = hw_to_local(hw); BUG_ON(!list_empty(&local->interfaces)); mutex_destroy(&local->iflist_mtx); wpan_phy_free(local->phy); } EXPORT_SYMBOL(ieee802154_free_hw); static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy) { /* TODO warn on empty symbol_duration * Should be done when all drivers sets this value. */ wpan_phy->lifs_period = (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / NSEC_PER_USEC; wpan_phy->sifs_period = (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / NSEC_PER_USEC; } int ieee802154_register_hw(struct ieee802154_hw *hw) { struct ieee802154_local *local = hw_to_local(hw); char mac_wq_name[IFNAMSIZ + 10] = {}; struct net_device *dev; int rc = -ENOSYS; local->workqueue = create_singlethread_workqueue(wpan_phy_name(local->phy)); if (!local->workqueue) { rc = -ENOMEM; goto out; } snprintf(mac_wq_name, IFNAMSIZ + 10, "%s-mac-cmds", wpan_phy_name(local->phy)); local->mac_wq = create_singlethread_workqueue(mac_wq_name); if (!local->mac_wq) { rc = -ENOMEM; goto out_wq; } hrtimer_setup(&local->ifs_timer, ieee802154_xmit_ifs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); wpan_phy_set_dev(local->phy, local->hw.parent); ieee802154_setup_wpan_phy_pib(local->phy); ieee802154_configure_durations(local->phy, local->phy->current_page, local->phy->current_channel); if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) { local->phy->supported.min_csma_backoffs = 4; local->phy->supported.max_csma_backoffs = 4; local->phy->supported.min_maxbe = 5; local->phy->supported.max_maxbe = 5; local->phy->supported.min_minbe = 3; local->phy->supported.max_minbe = 3; } if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) { local->phy->supported.min_frame_retries = 3; local->phy->supported.max_frame_retries = 3; } if (hw->flags & IEEE802154_HW_PROMISCUOUS) local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR); rc = wpan_phy_register(local->phy); if (rc < 0) goto out_mac_wq; rtnl_lock(); dev = ieee802154_if_add(local, "wpan%d", NET_NAME_ENUM, NL802154_IFTYPE_NODE, cpu_to_le64(0x0000000000000000ULL)); if (IS_ERR(dev)) { rtnl_unlock(); rc = PTR_ERR(dev); goto out_phy; } rtnl_unlock(); return 0; out_phy: wpan_phy_unregister(local->phy); out_mac_wq: destroy_workqueue(local->mac_wq); out_wq: destroy_workqueue(local->workqueue); out: return rc; } EXPORT_SYMBOL(ieee802154_register_hw); void ieee802154_unregister_hw(struct ieee802154_hw *hw) { struct ieee802154_local *local = hw_to_local(hw); tasklet_kill(&local->tasklet); flush_workqueue(local->workqueue); rtnl_lock(); ieee802154_remove_interfaces(local); rtnl_unlock(); destroy_workqueue(local->mac_wq); destroy_workqueue(local->workqueue); wpan_phy_unregister(local->phy); } EXPORT_SYMBOL(ieee802154_unregister_hw); static int __init ieee802154_init(void) { return ieee802154_iface_init(); } static void __exit ieee802154_exit(void) { ieee802154_iface_exit(); rcu_barrier(); } subsys_initcall(ieee802154_init); module_exit(ieee802154_exit); MODULE_DESCRIPTION("IEEE 802.15.4 subsystem"); MODULE_LICENSE("GPL v2");
3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PACKET_INTERNAL_H__ #define __PACKET_INTERNAL_H__ #include <linux/refcount.h> struct packet_mclist { struct packet_mclist *next; int ifindex; int count; unsigned short type; unsigned short alen; unsigned char addr[MAX_ADDR_LEN]; }; /* kbdq - kernel block descriptor queue */ struct tpacket_kbdq_core { struct pgv *pkbdq; unsigned int feature_req_word; unsigned int hdrlen; unsigned char reset_pending_on_curr_blk; unsigned char delete_blk_timer; unsigned short kactive_blk_num; unsigned short blk_sizeof_priv; /* last_kactive_blk_num: * trick to see if user-space has caught up * in order to avoid refreshing timer when every single pkt arrives. */ unsigned short last_kactive_blk_num; char *pkblk_start; char *pkblk_end; int kblk_size; unsigned int max_frame_len; unsigned int knum_blocks; uint64_t knxt_seq_num; char *prev; char *nxt_offset; struct sk_buff *skb; rwlock_t blk_fill_in_prog_lock; /* Default is set to 8ms */ #define DEFAULT_PRB_RETIRE_TOV (8) unsigned short retire_blk_tov; unsigned short version; unsigned long tov_in_jiffies; /* timer to retire an outstanding block */ struct timer_list retire_blk_timer; }; struct pgv { char *buffer; }; struct packet_ring_buffer { struct pgv *pg_vec; unsigned int head; unsigned int frames_per_block; unsigned int frame_size; unsigned int frame_max; unsigned int pg_vec_order; unsigned int pg_vec_pages; unsigned int pg_vec_len; unsigned int __percpu *pending_refcnt; union { unsigned long *rx_owner_map; struct tpacket_kbdq_core prb_bdqc; }; }; extern struct mutex fanout_mutex; #define PACKET_FANOUT_MAX (1 << 16) struct packet_fanout { possible_net_t net; unsigned int num_members; u32 max_num_members; u16 id; u8 type; u8 flags; union { atomic_t rr_cur; struct bpf_prog __rcu *bpf_prog; }; struct list_head list; spinlock_t lock; refcount_t sk_ref; struct packet_type prot_hook ____cacheline_aligned_in_smp; struct sock __rcu *arr[] __counted_by(max_num_members); }; struct packet_rollover { int sock; atomic_long_t num; atomic_long_t num_huge; atomic_long_t num_failed; #define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32)) u32 history[ROLLOVER_HLEN] ____cacheline_aligned; } ____cacheline_aligned_in_smp; struct packet_sock { /* struct sock has to be the first member of packet_sock */ struct sock sk; struct packet_fanout *fanout; union tpacket_stats_u stats; struct packet_ring_buffer rx_ring; struct packet_ring_buffer tx_ring; int copy_thresh; spinlock_t bind_lock; struct mutex pg_vec_lock; unsigned long flags; int ifindex; /* bound device */ u8 vnet_hdr_sz; __be16 num; struct packet_rollover *rollover; struct packet_mclist *mclist; atomic_long_t mapped; enum tpacket_versions tp_version; unsigned int tp_hdrlen; unsigned int tp_reserve; unsigned int tp_tstamp; struct completion skb_completion; struct net_device __rcu *cached_dev; struct packet_type prot_hook ____cacheline_aligned_in_smp; atomic_t tp_drops ____cacheline_aligned_in_smp; }; #define pkt_sk(ptr) container_of_const(ptr, struct packet_sock, sk) enum packet_sock_flags { PACKET_SOCK_ORIGDEV, PACKET_SOCK_AUXDATA, PACKET_SOCK_TX_HAS_OFF, PACKET_SOCK_TP_LOSS, PACKET_SOCK_RUNNING, PACKET_SOCK_PRESSURE, PACKET_SOCK_QDISC_BYPASS, }; static inline void packet_sock_flag_set(struct packet_sock *po, enum packet_sock_flags flag, bool val) { if (val) set_bit(flag, &po->flags); else clear_bit(flag, &po->flags); } static inline bool packet_sock_flag(const struct packet_sock *po, enum packet_sock_flags flag) { return test_bit(flag, &po->flags); } #endif
2 2 2 2 4 1 4 3 1 4 4 4 4 2 2 4 4 4 4 4 4 4 4 2 1 1 2 2 4 4 4 4 2 4 4 2 4 2 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 // SPDX-License-Identifier: GPL-2.0-or-later /* * Topro TP6800/6810 webcam driver. * * Copyright (C) 2011 Jean-François Moine (http://moinejf.free.fr) * Copyright (C) 2009 Anders Blomdell (anders.blomdell@control.lth.se) * Copyright (C) 2008 Thomas Champagne (lafeuil@gmail.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "gspca.h" MODULE_DESCRIPTION("Topro TP6800/6810 gspca webcam driver"); MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Anders Blomdell <anders.blomdell@control.lth.se>"); MODULE_LICENSE("GPL"); static int force_sensor = -1; /* JPEG header */ static const u8 jpeg_head[] = { 0xff, 0xd8, /* jpeg */ /* quantization table quality 50% */ 0xff, 0xdb, 0x00, 0x84, /* DQT */ 0, #define JPEG_QT0_OFFSET 7 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 1, #define JPEG_QT1_OFFSET 72 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, /* Define Huffman table (thanks to Thomas Kaiser) */ 0xff, 0xc4, 0x01, 0x5e, 0x00, 0x00, 0x02, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x06, 0x01, 0x00, 0x00, 0x57, 0x01, 0x02, 0x03, 0x00, 0x11, 0x04, 0x12, 0x21, 0x31, 0x13, 0x41, 0x51, 0x61, 0x05, 0x22, 0x32, 0x14, 0x71, 0x81, 0x91, 0x15, 0x23, 0x42, 0x52, 0x62, 0xa1, 0xb1, 0x06, 0x33, 0x72, 0xc1, 0xd1, 0x24, 0x43, 0x53, 0x82, 0x16, 0x34, 0x92, 0xa2, 0xe1, 0xf1, 0xf0, 0x07, 0x08, 0x17, 0x18, 0x25, 0x26, 0x27, 0x28, 0x35, 0x36, 0x37, 0x38, 0x44, 0x45, 0x46, 0x47, 0x48, 0x54, 0x55, 0x56, 0x57, 0x58, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x06, 0x01, 0x00, 0x00, 0x57, 0x00, 0x01, 0x11, 0x02, 0x21, 0x03, 0x12, 0x31, 0x41, 0x13, 0x22, 0x51, 0x61, 0x04, 0x32, 0x71, 0x05, 0x14, 0x23, 0x42, 0x33, 0x52, 0x81, 0x91, 0xa1, 0xb1, 0xf0, 0x06, 0x15, 0xc1, 0xd1, 0xe1, 0x24, 0x43, 0x62, 0xf1, 0x16, 0x25, 0x34, 0x53, 0x72, 0x82, 0x92, 0x07, 0x08, 0x17, 0x18, 0x26, 0x27, 0x28, 0x35, 0x36, 0x37, 0x38, 0x44, 0x45, 0x46, 0x47, 0x48, 0x54, 0x55, 0x56, 0x57, 0x58, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xff, 0xc0, 0x00, 0x11, /* SOF0 (start of frame 0 */ 0x08, /* data precision */ #define JPEG_HEIGHT_OFFSET 493 0x01, 0xe0, /* height */ 0x02, 0x80, /* width */ 0x03, /* component number */ 0x01, 0x21, /* samples Y = jpeg 422 */ 0x00, /* quant Y */ 0x02, 0x11, 0x01, /* samples CbCr - quant CbCr */ 0x03, 0x11, 0x01, 0xff, 0xda, 0x00, 0x0c, /* SOS (start of scan) */ 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00 #define JPEG_HDR_SZ 521 }; struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *jpegqual; struct v4l2_ctrl *sharpness; struct v4l2_ctrl *gamma; struct v4l2_ctrl *blue; struct v4l2_ctrl *red; u8 framerate; u8 quality; /* webcam current JPEG quality (0..16) */ s8 ag_cnt; /* autogain / start counter for tp6810 */ #define AG_CNT_START 13 /* check gain every N frames */ u8 bridge; u8 sensor; u8 jpeg_hdr[JPEG_HDR_SZ]; }; enum bridges { BRIDGE_TP6800, BRIDGE_TP6810, }; enum sensors { SENSOR_CX0342, SENSOR_SOI763A, /* ~= ov7630 / ov7648 */ NSENSORS }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG} }; /* * JPEG quality * index: webcam compression * value: JPEG quality in % */ static const u8 jpeg_q[17] = { 88, 77, 67, 57, 55, 55, 45, 45, 36, 36, 30, 30, 26, 26, 22, 22, 94 }; #define BULK_OUT_SIZE 0x20 #if BULK_OUT_SIZE > USB_BUF_SZ #error "USB buffer too small" #endif #define DEFAULT_FRAME_RATE 30 static const u8 rates[] = {30, 20, 15, 10, 7, 5}; static const struct framerates framerates[] = { { .rates = rates, .nrates = ARRAY_SIZE(rates) }, { .rates = rates, .nrates = ARRAY_SIZE(rates) } }; static const u8 rates_6810[] = {30, 15, 10, 7, 5}; static const struct framerates framerates_6810[] = { { .rates = rates_6810, .nrates = ARRAY_SIZE(rates_6810) }, { .rates = rates_6810, .nrates = ARRAY_SIZE(rates_6810) } }; /* * webcam quality in % * the last value is the ultra fine quality */ /* TP6800 register offsets */ #define TP6800_R10_SIF_TYPE 0x10 #define TP6800_R11_SIF_CONTROL 0x11 #define TP6800_R12_SIF_ADDR_S 0x12 #define TP6800_R13_SIF_TX_DATA 0x13 #define TP6800_R14_SIF_RX_DATA 0x14 #define TP6800_R15_GPIO_PU 0x15 #define TP6800_R16_GPIO_PD 0x16 #define TP6800_R17_GPIO_IO 0x17 #define TP6800_R18_GPIO_DATA 0x18 #define TP6800_R19_SIF_ADDR_S2 0x19 #define TP6800_R1A_SIF_TX_DATA2 0x1a #define TP6800_R1B_SIF_RX_DATA2 0x1b #define TP6800_R21_ENDP_1_CTL 0x21 #define TP6800_R2F_TIMING_CFG 0x2f #define TP6800_R30_SENSOR_CFG 0x30 #define TP6800_R31_PIXEL_START 0x31 #define TP6800_R32_PIXEL_END_L 0x32 #define TP6800_R33_PIXEL_END_H 0x33 #define TP6800_R34_LINE_START 0x34 #define TP6800_R35_LINE_END_L 0x35 #define TP6800_R36_LINE_END_H 0x36 #define TP6800_R37_FRONT_DARK_ST 0x37 #define TP6800_R38_FRONT_DARK_END 0x38 #define TP6800_R39_REAR_DARK_ST_L 0x39 #define TP6800_R3A_REAR_DARK_ST_H 0x3a #define TP6800_R3B_REAR_DARK_END_L 0x3b #define TP6800_R3C_REAR_DARK_END_H 0x3c #define TP6800_R3D_HORIZ_DARK_LINE_L 0x3d #define TP6800_R3E_HORIZ_DARK_LINE_H 0x3e #define TP6800_R3F_FRAME_RATE 0x3f #define TP6800_R50 0x50 #define TP6800_R51 0x51 #define TP6800_R52 0x52 #define TP6800_R53 0x53 #define TP6800_R54_DARK_CFG 0x54 #define TP6800_R55_GAMMA_R 0x55 #define TP6800_R56_GAMMA_G 0x56 #define TP6800_R57_GAMMA_B 0x57 #define TP6800_R5C_EDGE_THRLD 0x5c #define TP6800_R5D_DEMOSAIC_CFG 0x5d #define TP6800_R78_FORMAT 0x78 #define TP6800_R79_QUALITY 0x79 #define TP6800_R7A_BLK_THRLD 0x7a /* CX0342 register offsets */ #define CX0342_SENSOR_ID 0x00 #define CX0342_VERSION_NO 0x01 #define CX0342_ORG_X_L 0x02 #define CX0342_ORG_X_H 0x03 #define CX0342_ORG_Y_L 0x04 #define CX0342_ORG_Y_H 0x05 #define CX0342_STOP_X_L 0x06 #define CX0342_STOP_X_H 0x07 #define CX0342_STOP_Y_L 0x08 #define CX0342_STOP_Y_H 0x09 #define CX0342_FRAME_WIDTH_L 0x0a #define CX0342_FRAME_WIDTH_H 0x0b #define CX0342_FRAME_HEIGH_L 0x0c #define CX0342_FRAME_HEIGH_H 0x0d #define CX0342_EXPO_LINE_L 0x10 #define CX0342_EXPO_LINE_H 0x11 #define CX0342_EXPO_CLK_L 0x12 #define CX0342_EXPO_CLK_H 0x13 #define CX0342_RAW_GRGAIN_L 0x14 #define CX0342_RAW_GRGAIN_H 0x15 #define CX0342_RAW_GBGAIN_L 0x16 #define CX0342_RAW_GBGAIN_H 0x17 #define CX0342_RAW_RGAIN_L 0x18 #define CX0342_RAW_RGAIN_H 0x19 #define CX0342_RAW_BGAIN_L 0x1a #define CX0342_RAW_BGAIN_H 0x1b #define CX0342_GLOBAL_GAIN 0x1c #define CX0342_SYS_CTRL_0 0x20 #define CX0342_SYS_CTRL_1 0x21 #define CX0342_SYS_CTRL_2 0x22 #define CX0342_BYPASS_MODE 0x23 #define CX0342_SYS_CTRL_3 0x24 #define CX0342_TIMING_EN 0x25 #define CX0342_OUTPUT_CTRL 0x26 #define CX0342_AUTO_ADC_CALIB 0x27 #define CX0342_SYS_CTRL_4 0x28 #define CX0342_ADCGN 0x30 #define CX0342_SLPCR 0x31 #define CX0342_SLPFN_LO 0x32 #define CX0342_ADC_CTL 0x33 #define CX0342_LVRST_BLBIAS 0x34 #define CX0342_VTHSEL 0x35 #define CX0342_RAMP_RIV 0x36 #define CX0342_LDOSEL 0x37 #define CX0342_CLOCK_GEN 0x40 #define CX0342_SOFT_RESET 0x41 #define CX0342_PLL 0x42 #define CX0342_DR_ENH_PULSE_OFFSET_L 0x43 #define CX0342_DR_ENH_PULSE_OFFSET_H 0x44 #define CX0342_DR_ENH_PULSE_POS_L 0x45 #define CX0342_DR_ENH_PULSE_POS_H 0x46 #define CX0342_DR_ENH_PULSE_WIDTH 0x47 #define CX0342_AS_CURRENT_CNT_L 0x48 #define CX0342_AS_CURRENT_CNT_H 0x49 #define CX0342_AS_PREVIOUS_CNT_L 0x4a #define CX0342_AS_PREVIOUS_CNT_H 0x4b #define CX0342_SPV_VALUE_L 0x4c #define CX0342_SPV_VALUE_H 0x4d #define CX0342_GPXLTHD_L 0x50 #define CX0342_GPXLTHD_H 0x51 #define CX0342_RBPXLTHD_L 0x52 #define CX0342_RBPXLTHD_H 0x53 #define CX0342_PLANETHD_L 0x54 #define CX0342_PLANETHD_H 0x55 #define CX0342_ROWDARK_TH 0x56 #define CX0342_ROWDARK_TOL 0x57 #define CX0342_RB_GAP_L 0x58 #define CX0342_RB_GAP_H 0x59 #define CX0342_G_GAP_L 0x5a #define CX0342_G_GAP_H 0x5b #define CX0342_AUTO_ROW_DARK 0x60 #define CX0342_MANUAL_DARK_VALUE 0x61 #define CX0342_GB_DARK_OFFSET 0x62 #define CX0342_GR_DARK_OFFSET 0x63 #define CX0342_RED_DARK_OFFSET 0x64 #define CX0342_BLUE_DARK_OFFSET 0x65 #define CX0342_DATA_SCALING_MULTI 0x66 #define CX0342_AUTOD_Q_FRAME 0x67 #define CX0342_AUTOD_ALLOW_VARI 0x68 #define CX0342_AUTO_DARK_VALUE_L 0x69 #define CX0342_AUTO_DARK_VALUE_H 0x6a #define CX0342_IO_CTRL_0 0x70 #define CX0342_IO_CTRL_1 0x71 #define CX0342_IO_CTRL_2 0x72 #define CX0342_IDLE_CTRL 0x73 #define CX0342_TEST_MODE 0x74 #define CX0342_FRAME_FIX_DATA_TEST 0x75 #define CX0342_FRAME_CNT_TEST 0x76 #define CX0342_RST_OVERFLOW_L 0x80 #define CX0342_RST_OVERFLOW_H 0x81 #define CX0342_RST_UNDERFLOW_L 0x82 #define CX0342_RST_UNDERFLOW_H 0x83 #define CX0342_DATA_OVERFLOW_L 0x84 #define CX0342_DATA_OVERFLOW_H 0x85 #define CX0342_DATA_UNDERFLOW_L 0x86 #define CX0342_DATA_UNDERFLOW_H 0x87 #define CX0342_CHANNEL_0_0_L_irst 0x90 #define CX0342_CHANNEL_0_0_H_irst 0x91 #define CX0342_CHANNEL_0_1_L_irst 0x92 #define CX0342_CHANNEL_0_1_H_irst 0x93 #define CX0342_CHANNEL_0_2_L_irst 0x94 #define CX0342_CHANNEL_0_2_H_irst 0x95 #define CX0342_CHANNEL_0_3_L_irst 0x96 #define CX0342_CHANNEL_0_3_H_irst 0x97 #define CX0342_CHANNEL_0_4_L_irst 0x98 #define CX0342_CHANNEL_0_4_H_irst 0x99 #define CX0342_CHANNEL_0_5_L_irst 0x9a #define CX0342_CHANNEL_0_5_H_irst 0x9b #define CX0342_CHANNEL_0_6_L_irst 0x9c #define CX0342_CHANNEL_0_6_H_irst 0x9d #define CX0342_CHANNEL_0_7_L_irst 0x9e #define CX0342_CHANNEL_0_7_H_irst 0x9f #define CX0342_CHANNEL_1_0_L_itx 0xa0 #define CX0342_CHANNEL_1_0_H_itx 0xa1 #define CX0342_CHANNEL_1_1_L_itx 0xa2 #define CX0342_CHANNEL_1_1_H_itx 0xa3 #define CX0342_CHANNEL_1_2_L_itx 0xa4 #define CX0342_CHANNEL_1_2_H_itx 0xa5 #define CX0342_CHANNEL_1_3_L_itx 0xa6 #define CX0342_CHANNEL_1_3_H_itx 0xa7 #define CX0342_CHANNEL_1_4_L_itx 0xa8 #define CX0342_CHANNEL_1_4_H_itx 0xa9 #define CX0342_CHANNEL_1_5_L_itx 0xaa #define CX0342_CHANNEL_1_5_H_itx 0xab #define CX0342_CHANNEL_1_6_L_itx 0xac #define CX0342_CHANNEL_1_6_H_itx 0xad #define CX0342_CHANNEL_1_7_L_itx 0xae #define CX0342_CHANNEL_1_7_H_itx 0xaf #define CX0342_CHANNEL_2_0_L_iwl 0xb0 #define CX0342_CHANNEL_2_0_H_iwl 0xb1 #define CX0342_CHANNEL_2_1_L_iwl 0xb2 #define CX0342_CHANNEL_2_1_H_iwl 0xb3 #define CX0342_CHANNEL_2_2_L_iwl 0xb4 #define CX0342_CHANNEL_2_2_H_iwl 0xb5 #define CX0342_CHANNEL_2_3_L_iwl 0xb6 #define CX0342_CHANNEL_2_3_H_iwl 0xb7 #define CX0342_CHANNEL_2_4_L_iwl 0xb8 #define CX0342_CHANNEL_2_4_H_iwl 0xb9 #define CX0342_CHANNEL_2_5_L_iwl 0xba #define CX0342_CHANNEL_2_5_H_iwl 0xbb #define CX0342_CHANNEL_2_6_L_iwl 0xbc #define CX0342_CHANNEL_2_6_H_iwl 0xbd #define CX0342_CHANNEL_2_7_L_iwl 0xbe #define CX0342_CHANNEL_2_7_H_iwl 0xbf #define CX0342_CHANNEL_3_0_L_ensp 0xc0 #define CX0342_CHANNEL_3_0_H_ensp 0xc1 #define CX0342_CHANNEL_3_1_L_ensp 0xc2 #define CX0342_CHANNEL_3_1_H_ensp 0xc3 #define CX0342_CHANNEL_3_2_L_ensp 0xc4 #define CX0342_CHANNEL_3_2_H_ensp 0xc5 #define CX0342_CHANNEL_3_3_L_ensp 0xc6 #define CX0342_CHANNEL_3_3_H_ensp 0xc7 #define CX0342_CHANNEL_3_4_L_ensp 0xc8 #define CX0342_CHANNEL_3_4_H_ensp 0xc9 #define CX0342_CHANNEL_3_5_L_ensp 0xca #define CX0342_CHANNEL_3_5_H_ensp 0xcb #define CX0342_CHANNEL_3_6_L_ensp 0xcc #define CX0342_CHANNEL_3_6_H_ensp 0xcd #define CX0342_CHANNEL_3_7_L_ensp 0xce #define CX0342_CHANNEL_3_7_H_ensp 0xcf #define CX0342_CHANNEL_4_0_L_sela 0xd0 #define CX0342_CHANNEL_4_0_H_sela 0xd1 #define CX0342_CHANNEL_4_1_L_sela 0xd2 #define CX0342_CHANNEL_4_1_H_sela 0xd3 #define CX0342_CHANNEL_5_0_L_intla 0xe0 #define CX0342_CHANNEL_5_0_H_intla 0xe1 #define CX0342_CHANNEL_5_1_L_intla 0xe2 #define CX0342_CHANNEL_5_1_H_intla 0xe3 #define CX0342_CHANNEL_5_2_L_intla 0xe4 #define CX0342_CHANNEL_5_2_H_intla 0xe5 #define CX0342_CHANNEL_5_3_L_intla 0xe6 #define CX0342_CHANNEL_5_3_H_intla 0xe7 #define CX0342_CHANNEL_6_0_L_xa_sel_pos 0xf0 #define CX0342_CHANNEL_6_0_H_xa_sel_pos 0xf1 #define CX0342_CHANNEL_7_1_L_cds_pos 0xf2 #define CX0342_CHANNEL_7_1_H_cds_pos 0xf3 #define CX0342_SENSOR_HEIGHT_L 0xfb #define CX0342_SENSOR_HEIGHT_H 0xfc #define CX0342_SENSOR_WIDTH_L 0xfd #define CX0342_SENSOR_WIDTH_H 0xfe #define CX0342_VSYNC_HSYNC_READ 0xff struct cmd { u8 reg; u8 val; }; static const u8 DQT[17][130] = { /* Define quantization table (thanks to Thomas Kaiser) */ { /* Quality 0 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x01, 0x04, 0x04, 0x04, 0x06, 0x05, 0x06, 0x0b, 0x06, 0x06, 0x0b, 0x18, 0x10, 0x0e, 0x10, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, }, { /* Quality 1 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x01, 0x08, 0x09, 0x09, 0x0c, 0x0a, 0x0c, 0x17, 0x0d, 0x0d, 0x17, 0x31, 0x21, 0x1c, 0x21, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, }, { /* Quality 2 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x01, 0x0c, 0x0d, 0x0d, 0x12, 0x0f, 0x12, 0x23, 0x13, 0x13, 0x23, 0x4a, 0x31, 0x2a, 0x31, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, 0x4a, }, { /* Quality 3 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x01, 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, }, { /* Quality 4 */ 0x00, 0x04, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x0a, 0x0a, 0x0a, 0x05, 0x05, 0x05, 0x05, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x01, 0x11, 0x16, 0x16, 0x1e, 0x1a, 0x1e, 0x3a, 0x20, 0x20, 0x3a, 0x7b, 0x52, 0x46, 0x52, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, }, { /* Quality 5 */ 0x00, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x0c, 0x0c, 0x0c, 0x06, 0x06, 0x06, 0x06, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x01, 0x11, 0x1b, 0x1b, 0x24, 0x1f, 0x24, 0x46, 0x27, 0x27, 0x46, 0x94, 0x63, 0x54, 0x63, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, }, { /* Quality 6 */ 0x00, 0x05, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x0e, 0x0e, 0x0e, 0x07, 0x07, 0x07, 0x07, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x01, 0x15, 0x1f, 0x1f, 0x2a, 0x24, 0x2a, 0x52, 0x2d, 0x2d, 0x52, 0xad, 0x73, 0x62, 0x73, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, }, { /* Quality 7 */ 0x00, 0x05, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x01, 0x15, 0x24, 0x24, 0x30, 0x2a, 0x30, 0x5e, 0x34, 0x34, 0x5e, 0xc6, 0x84, 0x70, 0x84, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, }, { /* Quality 8 */ 0x00, 0x06, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x14, 0x14, 0x14, 0x0a, 0x0a, 0x0a, 0x0a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f, 0x01, 0x19, 0x2d, 0x2d, 0x3c, 0x34, 0x3c, 0x75, 0x41, 0x41, 0x75, 0xf7, 0xa5, 0x8c, 0xa5, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, }, { /* Quality 9 */ 0x00, 0x06, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x18, 0x18, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x01, 0x19, 0x36, 0x36, 0x48, 0x3f, 0x48, 0x8d, 0x4e, 0x4e, 0x8d, 0xff, 0xc6, 0xa8, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 10 */ 0x00, 0x07, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x1c, 0x1c, 0x1c, 0x0e, 0x0e, 0x0e, 0x0e, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x01, 0x1d, 0x3f, 0x3f, 0x54, 0x49, 0x54, 0xa4, 0x5b, 0x5b, 0xa4, 0xff, 0xe7, 0xc4, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 11 */ 0x00, 0x07, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x20, 0x20, 0x20, 0x10, 0x10, 0x10, 0x10, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x01, 0x1d, 0x48, 0x48, 0x60, 0x54, 0x60, 0xbc, 0x68, 0x68, 0xbc, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 12 */ 0x00, 0x08, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x28, 0x28, 0x28, 0x14, 0x14, 0x14, 0x14, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x01, 0x22, 0x5a, 0x5a, 0x78, 0x69, 0x78, 0xeb, 0x82, 0x82, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 13 */ 0x00, 0x08, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x30, 0x30, 0x30, 0x18, 0x18, 0x18, 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x01, 0x22, 0x6c, 0x6c, 0x90, 0x7e, 0x90, 0xff, 0x9c, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 14 */ 0x00, 0x0a, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x1c, 0x38, 0x38, 0x38, 0x1c, 0x1c, 0x1c, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x01, 0x2a, 0x7e, 0x7e, 0xa8, 0x93, 0xa8, 0xff, 0xb6, 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 15 */ 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x40, 0x40, 0x40, 0x20, 0x20, 0x20, 0x20, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x01, 0x2a, 0x90, 0x90, 0xc0, 0xa8, 0xc0, 0xff, 0xd0, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, { /* Quality 16-31 */ 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x01, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, } }; static const struct cmd tp6810_cx_init_common[] = { {0x1c, 0x00}, {TP6800_R10_SIF_TYPE, 0x00}, {0x4e, 0x00}, {0x4f, 0x00}, {TP6800_R50, 0xff}, {TP6800_R51, 0x03}, {0x00, 0x07}, {TP6800_R79_QUALITY, 0x03}, {TP6800_R2F_TIMING_CFG, 0x37}, {TP6800_R30_SENSOR_CFG, 0x10}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R52, 0x40}, {TP6800_R53, 0x40}, {TP6800_R54_DARK_CFG, 0x40}, {TP6800_R30_SENSOR_CFG, 0x18}, {0x4b, 0x00}, {TP6800_R3F_FRAME_RATE, 0x83}, {TP6800_R79_QUALITY, 0x05}, {TP6800_R21_ENDP_1_CTL, 0x00}, {0x7c, 0x04}, {0x25, 0x14}, {0x26, 0x0f}, {0x7b, 0x10}, }; static const struct cmd tp6810_ov_init_common[] = { {0x1c, 0x00}, {TP6800_R10_SIF_TYPE, 0x00}, {0x4e, 0x00}, {0x4f, 0x00}, {TP6800_R50, 0xff}, {TP6800_R51, 0x03}, {0x00, 0x07}, {TP6800_R52, 0x40}, {TP6800_R53, 0x40}, {TP6800_R54_DARK_CFG, 0x40}, {TP6800_R79_QUALITY, 0x03}, {TP6800_R2F_TIMING_CFG, 0x17}, {TP6800_R30_SENSOR_CFG, 0x18}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R3F_FRAME_RATE, 0x86}, {0x25, 0x18}, {0x26, 0x0f}, {0x7b, 0x90}, }; static const struct cmd tp6810_bridge_start[] = { {0x59, 0x88}, {0x5a, 0x0f}, {0x5b, 0x4e}, {TP6800_R5C_EDGE_THRLD, 0x63}, {TP6800_R5D_DEMOSAIC_CFG, 0x00}, {0x03, 0x7f}, {0x04, 0x80}, {0x06, 0x00}, {0x00, 0x00}, }; static const struct cmd tp6810_late_start[] = { {0x7d, 0x01}, {0xb0, 0x04}, {0xb1, 0x04}, {0xb2, 0x04}, {0xb3, 0x04}, {0xb4, 0x04}, {0xb5, 0x04}, {0xb6, 0x08}, {0xb7, 0x08}, {0xb8, 0x04}, {0xb9, 0x04}, {0xba, 0x04}, {0xbb, 0x04}, {0xbc, 0x04}, {0xbd, 0x08}, {0xbe, 0x08}, {0xbf, 0x08}, {0xc0, 0x04}, {0xc1, 0x04}, {0xc2, 0x08}, {0xc3, 0x08}, {0xc4, 0x08}, {0xc5, 0x08}, {0xc6, 0x08}, {0xc7, 0x13}, {0xc8, 0x04}, {0xc9, 0x08}, {0xca, 0x08}, {0xcb, 0x08}, {0xcc, 0x08}, {0xcd, 0x08}, {0xce, 0x13}, {0xcf, 0x13}, {0xd0, 0x08}, {0xd1, 0x08}, {0xd2, 0x08}, {0xd3, 0x08}, {0xd4, 0x08}, {0xd5, 0x13}, {0xd6, 0x13}, {0xd7, 0x13}, {0xd8, 0x08}, {0xd9, 0x08}, {0xda, 0x08}, {0xdb, 0x08}, {0xdc, 0x13}, {0xdd, 0x13}, {0xde, 0x13}, {0xdf, 0x13}, {0xe0, 0x08}, {0xe1, 0x08}, {0xe2, 0x08}, {0xe3, 0x13}, {0xe4, 0x13}, {0xe5, 0x13}, {0xe6, 0x13}, {0xe7, 0x13}, {0xe8, 0x08}, {0xe9, 0x08}, {0xea, 0x13}, {0xeb, 0x13}, {0xec, 0x13}, {0xed, 0x13}, {0xee, 0x13}, {0xef, 0x13}, {0x7d, 0x02}, /* later after isoc start */ {0x7d, 0x08}, {0x7d, 0x00}, }; static const struct cmd cx0342_timing_seq[] = { {CX0342_CHANNEL_0_1_L_irst, 0x20}, {CX0342_CHANNEL_0_2_L_irst, 0x24}, {CX0342_CHANNEL_0_2_H_irst, 0x00}, {CX0342_CHANNEL_0_3_L_irst, 0x2f}, {CX0342_CHANNEL_0_3_H_irst, 0x00}, {CX0342_CHANNEL_1_0_L_itx, 0x02}, {CX0342_CHANNEL_1_0_H_itx, 0x00}, {CX0342_CHANNEL_1_1_L_itx, 0x20}, {CX0342_CHANNEL_1_1_H_itx, 0x00}, {CX0342_CHANNEL_1_2_L_itx, 0xe4}, {CX0342_CHANNEL_1_2_H_itx, 0x00}, {CX0342_CHANNEL_1_3_L_itx, 0xee}, {CX0342_CHANNEL_1_3_H_itx, 0x00}, {CX0342_CHANNEL_2_0_L_iwl, 0x30}, {CX0342_CHANNEL_2_0_H_iwl, 0x00}, {CX0342_CHANNEL_3_0_L_ensp, 0x34}, {CX0342_CHANNEL_3_1_L_ensp, 0xe2}, {CX0342_CHANNEL_3_1_H_ensp, 0x00}, {CX0342_CHANNEL_3_2_L_ensp, 0xf6}, {CX0342_CHANNEL_3_2_H_ensp, 0x00}, {CX0342_CHANNEL_3_3_L_ensp, 0xf4}, {CX0342_CHANNEL_3_3_H_ensp, 0x02}, {CX0342_CHANNEL_4_0_L_sela, 0x26}, {CX0342_CHANNEL_4_0_H_sela, 0x00}, {CX0342_CHANNEL_4_1_L_sela, 0xe2}, {CX0342_CHANNEL_4_1_H_sela, 0x00}, {CX0342_CHANNEL_5_0_L_intla, 0x26}, {CX0342_CHANNEL_5_1_L_intla, 0x29}, {CX0342_CHANNEL_5_2_L_intla, 0xf0}, {CX0342_CHANNEL_5_2_H_intla, 0x00}, {CX0342_CHANNEL_5_3_L_intla, 0xf3}, {CX0342_CHANNEL_5_3_H_intla, 0x00}, {CX0342_CHANNEL_6_0_L_xa_sel_pos, 0x24}, {CX0342_CHANNEL_7_1_L_cds_pos, 0x02}, {CX0342_TIMING_EN, 0x01}, }; /* define the JPEG header */ static void jpeg_define(u8 *jpeg_hdr, int height, int width) { memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head); jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8; jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height; jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8; jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width; } /* set the JPEG quality for sensor soi763a */ static void jpeg_set_qual(u8 *jpeg_hdr, int quality) { int i, sc; if (quality <= 0) sc = 5000; else if (quality < 50) sc = 5000 / quality; else sc = 200 - quality * 2; for (i = 0; i < 64; i++) { jpeg_hdr[JPEG_QT0_OFFSET + i] = (jpeg_head[JPEG_QT0_OFFSET + i] * sc + 50) / 100; jpeg_hdr[JPEG_QT1_OFFSET + i] = (jpeg_head[JPEG_QT1_OFFSET + i] * sc + 50) / 100; } } static void reg_w(struct gspca_dev *gspca_dev, u8 index, u8 value) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x0e, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); if (ret < 0) { pr_err("reg_w err %d\n", ret); gspca_dev->usb_err = ret; } } /* the returned value is in gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, u8 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x0d, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, gspca_dev->usb_buf, 1, 500); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; } } static void reg_w_buf(struct gspca_dev *gspca_dev, const struct cmd *p, int l) { do { reg_w(gspca_dev, p->reg, p->val); p++; } while (--l > 0); } static int i2c_w(struct gspca_dev *gspca_dev, u8 index, u8 value) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00); reg_w(gspca_dev, TP6800_R19_SIF_ADDR_S2, index); reg_w(gspca_dev, TP6800_R13_SIF_TX_DATA, value); reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x01); if (sd->bridge == BRIDGE_TP6800) return 0; msleep(5); reg_r(gspca_dev, TP6800_R11_SIF_CONTROL); if (gspca_dev->usb_buf[0] == 0) return 0; reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00); return -1; /* error */ } static void i2c_w_buf(struct gspca_dev *gspca_dev, const struct cmd *p, int l) { do { i2c_w(gspca_dev, p->reg, p->val); p++; } while (--l > 0); } static int i2c_r(struct gspca_dev *gspca_dev, u8 index, int len) { struct sd *sd = (struct sd *) gspca_dev; int v; reg_w(gspca_dev, TP6800_R19_SIF_ADDR_S2, index); reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x02); msleep(5); reg_r(gspca_dev, TP6800_R14_SIF_RX_DATA); v = gspca_dev->usb_buf[0]; if (sd->bridge == BRIDGE_TP6800) return v; if (len > 1) { reg_r(gspca_dev, TP6800_R1B_SIF_RX_DATA2); v |= (gspca_dev->usb_buf[0] << 8); } reg_r(gspca_dev, TP6800_R11_SIF_CONTROL); if (gspca_dev->usb_buf[0] == 0) return v; reg_w(gspca_dev, TP6800_R11_SIF_CONTROL, 0x00); return -1; } static void bulk_w(struct gspca_dev *gspca_dev, u8 tag, const u8 *data, int length) { struct usb_device *dev = gspca_dev->dev; int count, actual_count, ret; if (gspca_dev->usb_err < 0) return; for (;;) { count = length > BULK_OUT_SIZE - 1 ? BULK_OUT_SIZE - 1 : length; gspca_dev->usb_buf[0] = tag; memcpy(&gspca_dev->usb_buf[1], data, count); ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 3), gspca_dev->usb_buf, count + 1, &actual_count, 500); if (ret < 0) { pr_err("bulk write error %d tag=%02x\n", ret, tag); gspca_dev->usb_err = ret; return; } length -= count; if (length <= 0) break; data += count; } } static int probe_6810(struct gspca_dev *gspca_dev) { u8 gpio; int ret; reg_r(gspca_dev, TP6800_R18_GPIO_DATA); gpio = gspca_dev->usb_buf[0]; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x04); /* i2c 16 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x21); /* ov??? */ reg_w(gspca_dev, TP6800_R1A_SIF_TX_DATA2, 0x00); if (i2c_w(gspca_dev, 0x00, 0x00) >= 0) return SENSOR_SOI763A; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x7f); /* (unknown i2c) */ if (i2c_w(gspca_dev, 0x00, 0x00) >= 0) return -2; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x11); /* tas??? / hv??? */ ret = i2c_r(gspca_dev, 0x00, 1); if (ret > 0) return -3; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x6e); /* po??? */ ret = i2c_r(gspca_dev, 0x00, 1); if (ret > 0) return -4; ret = i2c_r(gspca_dev, 0x01, 1); if (ret > 0) return -5; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x04); /* i2c 16 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x5d); /* mi/mt??? */ ret = i2c_r(gspca_dev, 0x00, 2); if (ret > 0) return -6; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x5c); /* mi/mt??? */ ret = i2c_r(gspca_dev, 0x36, 2); if (ret > 0) return -7; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x61); /* (unknown i2c) */ reg_w(gspca_dev, TP6800_R1A_SIF_TX_DATA2, 0x10); if (i2c_w(gspca_dev, 0xff, 0x00) >= 0) return -8; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio); reg_w(gspca_dev, TP6800_R18_GPIO_DATA, gpio | 0x20); reg_w(gspca_dev, TP6800_R10_SIF_TYPE, 0x00); /* i2c 8 bits */ reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); /* cx0342 */ ret = i2c_r(gspca_dev, 0x00, 1); if (ret > 0) return SENSOR_CX0342; return -9; } static void cx0342_6810_init(struct gspca_dev *gspca_dev) { static const struct cmd reg_init_1[] = { {TP6800_R2F_TIMING_CFG, 0x2f}, {0x25, 0x02}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R3F_FRAME_RATE, 0x80}, {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R18_GPIO_DATA, 0xe1}, {TP6800_R18_GPIO_DATA, 0xc1}, {TP6800_R18_GPIO_DATA, 0xe1}, {TP6800_R11_SIF_CONTROL, 0x00}, }; static const struct cmd reg_init_2[] = { {TP6800_R78_FORMAT, 0x48}, {TP6800_R11_SIF_CONTROL, 0x00}, }; static const struct cmd sensor_init[] = { {CX0342_OUTPUT_CTRL, 0x07}, {CX0342_BYPASS_MODE, 0x58}, {CX0342_GPXLTHD_L, 0x28}, {CX0342_RBPXLTHD_L, 0x28}, {CX0342_PLANETHD_L, 0x50}, {CX0342_PLANETHD_H, 0x03}, {CX0342_RB_GAP_L, 0xff}, {CX0342_RB_GAP_H, 0x07}, {CX0342_G_GAP_L, 0xff}, {CX0342_G_GAP_H, 0x07}, {CX0342_RST_OVERFLOW_L, 0x5c}, {CX0342_RST_OVERFLOW_H, 0x01}, {CX0342_DATA_OVERFLOW_L, 0xfc}, {CX0342_DATA_OVERFLOW_H, 0x03}, {CX0342_DATA_UNDERFLOW_L, 0x00}, {CX0342_DATA_UNDERFLOW_H, 0x00}, {CX0342_SYS_CTRL_0, 0x40}, {CX0342_GLOBAL_GAIN, 0x01}, {CX0342_CLOCK_GEN, 0x00}, {CX0342_SYS_CTRL_0, 0x02}, {CX0342_IDLE_CTRL, 0x05}, {CX0342_ADCGN, 0x00}, {CX0342_ADC_CTL, 0x00}, {CX0342_LVRST_BLBIAS, 0x01}, {CX0342_VTHSEL, 0x0b}, {CX0342_RAMP_RIV, 0x0b}, {CX0342_LDOSEL, 0x07}, {CX0342_SPV_VALUE_L, 0x40}, {CX0342_SPV_VALUE_H, 0x02}, {CX0342_AUTO_ADC_CALIB, 0x81}, {CX0342_TIMING_EN, 0x01}, }; reg_w_buf(gspca_dev, reg_init_1, ARRAY_SIZE(reg_init_1)); reg_w_buf(gspca_dev, tp6810_cx_init_common, ARRAY_SIZE(tp6810_cx_init_common)); reg_w_buf(gspca_dev, reg_init_2, ARRAY_SIZE(reg_init_2)); reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); /* cx0342 I2C addr */ i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); i2c_w_buf(gspca_dev, cx0342_timing_seq, ARRAY_SIZE(cx0342_timing_seq)); } static void soi763a_6810_init(struct gspca_dev *gspca_dev) { static const struct cmd reg_init_1[] = { {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R18_GPIO_DATA, 0xe1}, {0x25, 0x02}, {TP6800_R21_ENDP_1_CTL, 0x00}, {TP6800_R3F_FRAME_RATE, 0x80}, {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R18_GPIO_DATA, 0xc1}, }; static const struct cmd reg_init_2[] = { {TP6800_R78_FORMAT, 0x54}, }; static const struct cmd sensor_init[] = { {0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x03, 0x90}, {0x04, 0x20}, {0x05, 0x20}, {0x06, 0x80}, {0x07, 0x00}, {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0x76}, /* 7630 = soi673a */ {0x0b, 0x30}, {0x0c, 0x20}, {0x0d, 0x20}, {0x0e, 0xff}, {0x0f, 0xff}, {0x10, 0x41}, {0x15, 0x14}, {0x11, 0x40}, {0x12, 0x48}, {0x13, 0x80}, {0x14, 0x80}, {0x16, 0x03}, {0x28, 0xb0}, {0x71, 0x20}, {0x75, 0x8e}, {0x17, 0x1b}, {0x18, 0xbd}, {0x19, 0x05}, {0x1a, 0xf6}, {0x1b, 0x04}, {0x1c, 0x7f}, /* omnivision */ {0x1d, 0xa2}, {0x1e, 0x00}, {0x1f, 0x00}, {0x20, 0x45}, {0x21, 0x80}, {0x22, 0x80}, {0x23, 0xee}, {0x24, 0x50}, {0x25, 0x7a}, {0x26, 0xa0}, {0x27, 0x9a}, {0x29, 0x30}, {0x2a, 0x80}, {0x2b, 0x00}, {0x2c, 0xac}, {0x2d, 0x05}, {0x2e, 0x80}, {0x2f, 0x3c}, {0x30, 0x22}, {0x31, 0x00}, {0x32, 0x86}, {0x33, 0x08}, {0x34, 0xff}, {0x35, 0xff}, {0x36, 0xff}, {0x37, 0xff}, {0x38, 0xff}, {0x39, 0xff}, {0x3a, 0xfe}, {0x3b, 0xfe}, {0x3c, 0xfe}, {0x3d, 0xfe}, {0x3e, 0xfe}, {0x3f, 0x71}, {0x40, 0xff}, {0x41, 0xff}, {0x42, 0xff}, {0x43, 0xff}, {0x44, 0xff}, {0x45, 0xff}, {0x46, 0xff}, {0x47, 0xff}, {0x48, 0xff}, {0x49, 0xff}, {0x4a, 0xfe}, {0x4b, 0xff}, {0x4c, 0x00}, {0x4d, 0x00}, {0x4e, 0xff}, {0x4f, 0xff}, {0x50, 0xff}, {0x51, 0xff}, {0x52, 0xff}, {0x53, 0xff}, {0x54, 0xff}, {0x55, 0xff}, {0x56, 0xff}, {0x57, 0xff}, {0x58, 0xff}, {0x59, 0xff}, {0x5a, 0xff}, {0x5b, 0xfe}, {0x5c, 0xff}, {0x5d, 0x8f}, {0x5e, 0xff}, {0x5f, 0x8f}, {0x60, 0xa2}, {0x61, 0x4a}, {0x62, 0xf3}, {0x63, 0x75}, {0x64, 0xf0}, {0x65, 0x00}, {0x66, 0x55}, {0x67, 0x92}, {0x68, 0xa0}, {0x69, 0x4a}, {0x6a, 0x22}, {0x6b, 0x00}, {0x6c, 0x33}, {0x6d, 0x44}, {0x6e, 0x22}, {0x6f, 0x84}, {0x70, 0x0b}, {0x72, 0x10}, {0x73, 0x50}, {0x74, 0x21}, {0x76, 0x00}, {0x77, 0xa5}, {0x78, 0x80}, {0x79, 0x80}, {0x7a, 0x80}, {0x7b, 0xe2}, {0x7c, 0x00}, {0x7d, 0xf7}, {0x7e, 0x00}, {0x7f, 0x00}, }; reg_w_buf(gspca_dev, reg_init_1, ARRAY_SIZE(reg_init_1)); reg_w_buf(gspca_dev, tp6810_ov_init_common, ARRAY_SIZE(tp6810_ov_init_common)); reg_w_buf(gspca_dev, reg_init_2, ARRAY_SIZE(reg_init_2)); i2c_w(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(10); i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); } /* set the gain and exposure */ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 gain, s32 blue, s32 red) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_CX0342) { expo = (expo << 2) - 1; i2c_w(gspca_dev, CX0342_EXPO_LINE_L, expo); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, expo >> 8); if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_GBGAIN_H, gain >> 8); i2c_w(gspca_dev, CX0342_RAW_GBGAIN_L, gain); if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_GRGAIN_H, gain >> 8); i2c_w(gspca_dev, CX0342_RAW_GRGAIN_L, gain); if (sd->sensor == SENSOR_CX0342) { if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_BGAIN_H, blue >> 8); i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, blue); if (sd->bridge == BRIDGE_TP6800) i2c_w(gspca_dev, CX0342_RAW_RGAIN_H, red >> 8); i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, red); } i2c_w(gspca_dev, CX0342_SYS_CTRL_0, sd->bridge == BRIDGE_TP6800 ? 0x80 : 0x81); return; } /* soi763a */ i2c_w(gspca_dev, 0x10, /* AEC_H (exposure time) */ expo); /* i2c_w(gspca_dev, 0x76, 0x02); * AEC_L ([1:0] */ i2c_w(gspca_dev, 0x00, /* gain */ gain); } /* set the JPEG quantization tables */ static void set_dqt(struct gspca_dev *gspca_dev, u8 q) { struct sd *sd = (struct sd *) gspca_dev; /* update the jpeg quantization tables */ gspca_dbg(gspca_dev, D_STREAM, "q %d -> %d\n", sd->quality, q); sd->quality = q; if (q > 16) q = 16; if (sd->sensor == SENSOR_SOI763A) jpeg_set_qual(sd->jpeg_hdr, jpeg_q[q]); else memcpy(&sd->jpeg_hdr[JPEG_QT0_OFFSET - 1], DQT[q], sizeof DQT[0]); } /* set the JPEG compression quality factor */ static void setquality(struct gspca_dev *gspca_dev, s32 q) { struct sd *sd = (struct sd *) gspca_dev; if (q != 16) q = 15 - q; reg_w(gspca_dev, TP6800_R7A_BLK_THRLD, 0x00); reg_w(gspca_dev, TP6800_R79_QUALITY, 0x04); reg_w(gspca_dev, TP6800_R79_QUALITY, q); /* auto quality */ if (q == 15 && sd->bridge == BRIDGE_TP6810) { msleep(4); reg_w(gspca_dev, TP6800_R7A_BLK_THRLD, 0x19); } } static const u8 color_null[18] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 color_gain[NSENSORS][18] = { [SENSOR_CX0342] = {0x4c, 0x00, 0xa9, 0x00, 0x31, 0x00, /* Y R/G/B (LE values) */ 0xb6, 0x03, 0x6c, 0x03, 0xe0, 0x00, /* U R/G/B */ 0xdf, 0x00, 0x46, 0x03, 0xdc, 0x03}, /* V R/G/B */ [SENSOR_SOI763A] = {0x4c, 0x00, 0x95, 0x00, 0x1d, 0x00, /* Y R/G/B (LE values) */ 0xb6, 0x03, 0x6c, 0x03, 0xd7, 0x00, /* U R/G/B */ 0xd5, 0x00, 0x46, 0x03, 0xdc, 0x03}, /* V R/G/B */ }; static void setgamma(struct gspca_dev *gspca_dev, s32 gamma) { struct sd *sd = (struct sd *) gspca_dev; #define NGAMMA 6 static const u8 gamma_tb[NGAMMA][3][1024] = { { /* gamma 0 - from tp6800 + soi763a */ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09, 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09, 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x09, 0x0a, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x11, 0x12, 0x14, 0x14, 0x15, 0x16, 0x16, 0x17, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x76, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb} }, { /* gamma 1 - from tp6810 + soi763a */ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x22, 0x23, 0x25, 0x26, 0x27, 0x27, 0x28, 0x29, 0x2b, 0x2b, 0x2c, 0x2d, 0x2f, 0x2f, 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x07, 0x07, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x10, 0x10, 0x11, 0x12, 0x14, 0x15, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1a, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x25, 0x26, 0x27, 0x27, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x34, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0a, 0x0c, 0x0d, 0x0e, 0x0e, 0x10, 0x11, 0x12, 0x12, 0x14, 0x15, 0x16, 0x16, 0x17, 0x18, 0x18, 0x1a, 0x1b, 0x1b, 0x1c, 0x1e, 0x1e, 0x1f, 0x1f, 0x20, 0x22, 0x22, 0x23, 0x23, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x29, 0x29, 0x2b, 0x2b, 0x2c, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x34, 0x34, 0x35, 0x35, 0x37, 0x37, 0x38, 0x38, 0x39, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x47, 0x48, 0x48, 0x49, 0x49, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x50, 0x52, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, { /* gamma 2 */ {0x00, 0x01, 0x02, 0x05, 0x07, 0x08, 0x0a, 0x0c, 0x0d, 0x0e, 0x10, 0x12, 0x14, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x34, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x44, 0x45, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0d, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15, 0x16, 0x16, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x26, 0x26, 0x27, 0x28, 0x29, 0x29, 0x2b, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x00, 0x00, 0x00, 0x01, 0x02, 0x05, 0x07, 0x08, 0x09, 0x0a, 0x0c, 0x0e, 0x10, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x20, 0x22, 0x23, 0x25, 0x26, 0x27, 0x28, 0x28, 0x29, 0x2b, 0x2c, 0x2d, 0x2d, 0x2f, 0x30, 0x31, 0x31, 0x33, 0x34, 0x35, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x40, 0x42, 0x43, 0x43, 0x44, 0x44, 0x45, 0x47, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb} }, { /* gamma 3 - from tp6810 + cx0342 */ {0x08, 0x09, 0x0c, 0x0d, 0x10, 0x11, 0x14, 0x15, 0x17, 0x18, 0x1a, 0x1c, 0x1e, 0x1f, 0x20, 0x23, 0x25, 0x26, 0x27, 0x28, 0x2b, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x50, 0x52, 0x53, 0x53, 0x54, 0x55, 0x56, 0x56, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5c, 0x5c, 0x5e, 0x5f, 0x5f, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63, 0x65, 0x66, 0x66, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x03, 0x05, 0x07, 0x09, 0x0a, 0x0c, 0x0d, 0x10, 0x11, 0x12, 0x14, 0x15, 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x33, 0x34, 0x35, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x45, 0x45, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x71, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x07, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17, 0x18, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x43, 0x44, 0x44, 0x45, 0x47, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x54, 0x55, 0x55, 0x56, 0x58, 0x58, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63, 0x65, 0x65, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, { /* gamma 4 - from tp6800 + soi763a */ {0x11, 0x14, 0x15, 0x17, 0x1a, 0x1b, 0x1e, 0x1f, 0x22, 0x23, 0x25, 0x27, 0x28, 0x2b, 0x2c, 0x2d, 0x2f, 0x31, 0x33, 0x34, 0x35, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x40, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4f, 0x50, 0x52, 0x53, 0x53, 0x54, 0x55, 0x56, 0x58, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5f, 0x60, 0x61, 0x61, 0x62, 0x63, 0x65, 0x65, 0x66, 0x67, 0x68, 0x68, 0x69, 0x6a, 0x6c, 0x6c, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x71, 0x71, 0x73, 0x74, 0x74, 0x75, 0x77, 0x77, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7c, 0x7c, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9d, 0x9d, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x11, 0x14, 0x15, 0x16, 0x17, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x23, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x42, 0x43, 0x44, 0x45, 0x45, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4f, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x58, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x63, 0x63, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x73, 0x73, 0x74, 0x74, 0x74, 0x75, 0x75, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x86, 0x88, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xab, 0xaa, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb}, {0x0d, 0x10, 0x11, 0x14, 0x15, 0x17, 0x18, 0x1b, 0x1c, 0x1e, 0x20, 0x22, 0x23, 0x26, 0x27, 0x28, 0x29, 0x2b, 0x2d, 0x2f, 0x30, 0x31, 0x33, 0x34, 0x35, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3f, 0x40, 0x42, 0x43, 0x44, 0x45, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4d, 0x4f, 0x50, 0x52, 0x52, 0x53, 0x54, 0x55, 0x56, 0x56, 0x58, 0x59, 0x5a, 0x5a, 0x5b, 0x5c, 0x5e, 0x5e, 0x5f, 0x60, 0x60, 0x61, 0x62, 0x62, 0x63, 0x65, 0x65, 0x66, 0x67, 0x67, 0x68, 0x69, 0x69, 0x6a, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x82, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x94, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb} }, { /* gamma 5 */ {0x16, 0x18, 0x19, 0x1b, 0x1d, 0x1e, 0x20, 0x21, 0x23, 0x24, 0x25, 0x27, 0x28, 0x2a, 0x2b, 0x2c, 0x2d, 0x2f, 0x30, 0x31, 0x32, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x56, 0x57, 0x58, 0x59, 0x59, 0x5a, 0x5b, 0x5c, 0x5c, 0x5d, 0x5e, 0x5f, 0x5f, 0x60, 0x61, 0x62, 0x62, 0x63, 0x64, 0x64, 0x65, 0x66, 0x66, 0x67, 0x68, 0x68, 0x69, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6d, 0x6d, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x72, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x87, 0x87, 0x88, 0x88, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x90, 0x90, 0x91, 0x91, 0x91, 0x92, 0x92, 0x93, 0x93, 0x94, 0x94, 0x94, 0x95, 0x95, 0x96, 0x96, 0x96, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1, 0xc1, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x0f, 0x11, 0x12, 0x14, 0x15, 0x16, 0x18, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3c, 0x3d, 0x3e, 0x3f, 0x3f, 0x40, 0x41, 0x42, 0x42, 0x43, 0x44, 0x44, 0x45, 0x46, 0x47, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4b, 0x4c, 0x4c, 0x4d, 0x4e, 0x4e, 0x4f, 0x50, 0x50, 0x51, 0x51, 0x52, 0x53, 0x53, 0x54, 0x54, 0x55, 0x55, 0x56, 0x56, 0x57, 0x58, 0x58, 0x59, 0x59, 0x5a, 0x5a, 0x5b, 0x5b, 0x5c, 0x5c, 0x5d, 0x5d, 0x5e, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x61, 0x62, 0x62, 0x63, 0x63, 0x64, 0x64, 0x65, 0x65, 0x66, 0x66, 0x66, 0x67, 0x67, 0x68, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6c, 0x6d, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x71, 0x72, 0x72, 0x73, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x75, 0x76, 0x76, 0x76, 0x77, 0x77, 0x78, 0x78, 0x78, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x83, 0x83, 0x83, 0x84, 0x84, 0x84, 0x84, 0x85, 0x85, 0x85, 0x86, 0x86, 0x86, 0x87, 0x87, 0x87, 0x88, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x90, 0x91, 0x91, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94, 0x95, 0x95, 0x95, 0x96, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f, 0x9f, 0x9f, 0xa0, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1, 0xc1, 0xc1, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea, 0xea, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, {0x13, 0x15, 0x16, 0x18, 0x19, 0x1b, 0x1c, 0x1e, 0x1f, 0x20, 0x22, 0x23, 0x24, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4d, 0x4e, 0x4f, 0x50, 0x50, 0x51, 0x52, 0x53, 0x53, 0x54, 0x55, 0x55, 0x56, 0x57, 0x57, 0x58, 0x59, 0x59, 0x5a, 0x5b, 0x5b, 0x5c, 0x5d, 0x5d, 0x5e, 0x5f, 0x5f, 0x60, 0x60, 0x61, 0x62, 0x62, 0x63, 0x63, 0x64, 0x65, 0x65, 0x66, 0x66, 0x67, 0x67, 0x68, 0x69, 0x69, 0x6a, 0x6a, 0x6b, 0x6b, 0x6c, 0x6c, 0x6d, 0x6d, 0x6e, 0x6e, 0x6f, 0x6f, 0x70, 0x70, 0x71, 0x71, 0x72, 0x72, 0x73, 0x73, 0x74, 0x74, 0x75, 0x75, 0x76, 0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a, 0x7a, 0x7a, 0x7b, 0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x80, 0x80, 0x81, 0x81, 0x81, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84, 0x84, 0x85, 0x85, 0x86, 0x86, 0x86, 0x87, 0x87, 0x88, 0x88, 0x88, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8b, 0x8b, 0x8b, 0x8c, 0x8c, 0x8c, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, 0x8f, 0x8f, 0x8f, 0x90, 0x90, 0x90, 0x91, 0x91, 0x92, 0x92, 0x92, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x95, 0x95, 0x95, 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98, 0x99, 0x99, 0x99, 0x9a, 0x9a, 0x9a, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9c, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9e, 0x9f, 0x9f, 0x9f, 0xa0, 0xa0, 0xa0, 0xa1, 0xa1, 0xa1, 0xa2, 0xa2, 0xa2, 0xa2, 0xa3, 0xa3, 0xa3, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa8, 0xa8, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xaa, 0xaa, 0xaa, 0xab, 0xab, 0xab, 0xab, 0xac, 0xac, 0xac, 0xac, 0xad, 0xad, 0xad, 0xae, 0xae, 0xae, 0xae, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc0, 0xc0, 0xc0, 0xc0, 0xc1, 0xc1, 0xc1, 0xc1, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc3, 0xc3, 0xc3, 0xc3, 0xc4, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc6, 0xc6, 0xc6, 0xc6, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xca, 0xca, 0xca, 0xca, 0xcb, 0xcb, 0xcb, 0xcb, 0xcb, 0xcc, 0xcc, 0xcc, 0xcc, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd0, 0xd0, 0xd0, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe7, 0xe7, 0xe7, 0xe7, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xe9, 0xea, 0xea, 0xea, 0xea, 0xea, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xec, 0xec, 0xec, 0xec, 0xec, 0xed, 0xed, 0xed, 0xed, 0xed, 0xed, 0xee, 0xee, 0xee, 0xee, 0xee, 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7, 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, }, }; reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); if (sd->bridge == BRIDGE_TP6810) reg_w(gspca_dev, 0x02, 0x28); /* msleep(50); */ bulk_w(gspca_dev, 0x00, gamma_tb[gamma][0], 1024); bulk_w(gspca_dev, 0x01, gamma_tb[gamma][1], 1024); bulk_w(gspca_dev, 0x02, gamma_tb[gamma][2], 1024); if (sd->bridge == BRIDGE_TP6810) { int i; reg_w(gspca_dev, 0x02, 0x2b); reg_w(gspca_dev, 0x02, 0x28); for (i = 0; i < 6; i++) reg_w(gspca_dev, TP6800_R55_GAMMA_R, gamma_tb[gamma][0][i]); reg_w(gspca_dev, 0x02, 0x2b); reg_w(gspca_dev, 0x02, 0x28); for (i = 0; i < 6; i++) reg_w(gspca_dev, TP6800_R56_GAMMA_G, gamma_tb[gamma][1][i]); reg_w(gspca_dev, 0x02, 0x2b); reg_w(gspca_dev, 0x02, 0x28); for (i = 0; i < 6; i++) reg_w(gspca_dev, TP6800_R57_GAMMA_B, gamma_tb[gamma][2][i]); reg_w(gspca_dev, 0x02, 0x28); } reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03); /* msleep(50); */ } static void setsharpness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->bridge == BRIDGE_TP6800) { val |= 0x08; /* grid compensation enable */ if (gspca_dev->pixfmt.width == 640) reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ else val |= 0x04; /* scaling down enable */ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, val); } else { val = (val << 5) | 0x08; reg_w(gspca_dev, 0x59, val); } } static void setautogain(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->ag_cnt = val ? AG_CNT_START : -1; } /* set the resolution for sensor cx0342 */ static void set_resolution(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); if (gspca_dev->pixfmt.width == 320) { reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06); msleep(100); i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); msleep(100); reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, 0x0d); i2c_w(gspca_dev, CX0342_EXPO_LINE_L, 0x37); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x01); } else { reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x05); msleep(100); i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); msleep(100); reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x03); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, 0x09); i2c_w(gspca_dev, CX0342_EXPO_LINE_L, 0xcf); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x00); } i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342], ARRAY_SIZE(color_gain[0])); setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); if (sd->sensor == SENSOR_SOI763A) setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); } /* convert the frame rate to a tp68x0 value */ static int get_fr_idx(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; if (sd->bridge == BRIDGE_TP6800) { for (i = 0; i < ARRAY_SIZE(rates) - 1; i++) { if (sd->framerate >= rates[i]) break; } i = 6 - i; /* 1 = 5fps .. 6 = 30fps */ /* 640x480 * 30 fps does not work */ if (i == 6 /* if 30 fps */ && gspca_dev->pixfmt.width == 640) i = 0x05; /* 15 fps */ } else { for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) { if (sd->framerate >= rates_6810[i]) break; } i = 7 - i; /* 3 = 5fps .. 7 = 30fps */ /* 640x480 * 30 fps does not work */ if (i == 7 /* if 30 fps */ && gspca_dev->pixfmt.width == 640) i = 6; /* 15 fps */ i |= 0x80; /* clock * 1 */ } return i; } static void setframerate(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; u8 fr_idx; fr_idx = get_fr_idx(gspca_dev); if (sd->bridge == BRIDGE_TP6810) { reg_r(gspca_dev, 0x7b); reg_w(gspca_dev, 0x7b, sd->sensor == SENSOR_CX0342 ? 0x10 : 0x90); if (val >= 128) fr_idx = 0xf0; /* lower frame rate */ } reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, fr_idx); if (sd->sensor == SENSOR_CX0342) i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01); } static void setrgain(struct gspca_dev *gspca_dev, s32 rgain) { i2c_w(gspca_dev, CX0342_RAW_RGAIN_H, rgain >> 8); i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, rgain); i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80); } static int sd_setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; s32 val = gspca_dev->gain->val; if (sd->sensor == SENSOR_CX0342) { s32 old = gspca_dev->gain->cur.val ? gspca_dev->gain->cur.val : 1; sd->blue->val = sd->blue->val * val / old; if (sd->blue->val > 4095) sd->blue->val = 4095; sd->red->val = sd->red->val * val / old; if (sd->red->val > 4095) sd->red->val = 4095; } if (gspca_dev->streaming) { if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, gspca_dev->exposure->val, gspca_dev->gain->val, sd->blue->val, sd->red->val); else setexposure(gspca_dev, gspca_dev->exposure->val, gspca_dev->gain->val, 0, 0); } return gspca_dev->usb_err; } static void setbgain(struct gspca_dev *gspca_dev, s32 bgain) { i2c_w(gspca_dev, CX0342_RAW_BGAIN_H, bgain >> 8); i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, bgain); i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; sd->bridge = id->driver_info; gspca_dev->cam.cam_mode = vga_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode); gspca_dev->cam.mode_framerates = sd->bridge == BRIDGE_TP6800 ? framerates : framerates_6810; sd->framerate = DEFAULT_FRAME_RATE; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd tp6800_preinit[] = { {TP6800_R10_SIF_TYPE, 0x01}, /* sif */ {TP6800_R11_SIF_CONTROL, 0x01}, {TP6800_R15_GPIO_PU, 0x9f}, {TP6800_R16_GPIO_PD, 0x9f}, {TP6800_R17_GPIO_IO, 0x80}, {TP6800_R18_GPIO_DATA, 0x40}, /* LED off */ }; static const struct cmd tp6810_preinit[] = { {TP6800_R2F_TIMING_CFG, 0x2f}, {TP6800_R15_GPIO_PU, 0x6f}, {TP6800_R16_GPIO_PD, 0x40}, {TP6800_R17_GPIO_IO, 0x9f}, {TP6800_R18_GPIO_DATA, 0xc1}, /* LED off */ }; if (sd->bridge == BRIDGE_TP6800) reg_w_buf(gspca_dev, tp6800_preinit, ARRAY_SIZE(tp6800_preinit)); else reg_w_buf(gspca_dev, tp6810_preinit, ARRAY_SIZE(tp6810_preinit)); msleep(15); reg_r(gspca_dev, TP6800_R18_GPIO_DATA); gspca_dbg(gspca_dev, D_PROBE, "gpio: %02x\n", gspca_dev->usb_buf[0]); /* values: * 0x80: snapshot button * 0x40: LED * 0x20: (bridge / sensor) reset for tp6810 ? * 0x07: sensor type ? */ /* guess the sensor type */ if (force_sensor >= 0) { sd->sensor = force_sensor; } else { if (sd->bridge == BRIDGE_TP6800) { /*fixme: not sure this is working*/ switch (gspca_dev->usb_buf[0] & 0x07) { case 0: sd->sensor = SENSOR_SOI763A; break; case 1: sd->sensor = SENSOR_CX0342; break; } } else { int sensor; sensor = probe_6810(gspca_dev); if (sensor < 0) { pr_warn("Unknown sensor %d - forced to soi763a\n", -sensor); sensor = SENSOR_SOI763A; } sd->sensor = sensor; } } if (sd->sensor == SENSOR_SOI763A) { pr_info("Sensor soi763a\n"); if (sd->bridge == BRIDGE_TP6810) { soi763a_6810_init(gspca_dev); } } else { pr_info("Sensor cx0342\n"); if (sd->bridge == BRIDGE_TP6810) { cx0342_6810_init(gspca_dev); } } set_dqt(gspca_dev, 0); return 0; } /* This function is called before choosing the alt setting */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd cx_sensor_init[] = { {CX0342_AUTO_ADC_CALIB, 0x81}, {CX0342_EXPO_LINE_L, 0x37}, {CX0342_EXPO_LINE_H, 0x01}, {CX0342_RAW_GRGAIN_L, 0x00}, {CX0342_RAW_GBGAIN_L, 0x00}, {CX0342_RAW_RGAIN_L, 0x00}, {CX0342_RAW_BGAIN_L, 0x00}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd cx_bridge_init[] = { {0x4d, 0x00}, {0x4c, 0xff}, {0x4e, 0xff}, {0x4f, 0x00}, }; static const struct cmd ov_sensor_init[] = { {0x10, 0x75}, /* exposure */ {0x76, 0x03}, {0x00, 0x00}, /* gain */ }; static const struct cmd ov_bridge_init[] = { {0x7b, 0x90}, {TP6800_R3F_FRAME_RATE, 0x87}, }; if (sd->bridge == BRIDGE_TP6800) return 0; if (sd->sensor == SENSOR_CX0342) { reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x20); reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x87); i2c_w_buf(gspca_dev, cx_sensor_init, ARRAY_SIZE(cx_sensor_init)); reg_w_buf(gspca_dev, cx_bridge_init, ARRAY_SIZE(cx_bridge_init)); bulk_w(gspca_dev, 0x03, color_null, sizeof color_null); reg_w(gspca_dev, 0x59, 0x40); } else { reg_w(gspca_dev, TP6800_R12_SIF_ADDR_S, 0x21); i2c_w_buf(gspca_dev, ov_sensor_init, ARRAY_SIZE(ov_sensor_init)); reg_r(gspca_dev, 0x7b); reg_w_buf(gspca_dev, ov_bridge_init, ARRAY_SIZE(ov_bridge_init)); } reg_w(gspca_dev, TP6800_R78_FORMAT, gspca_dev->curr_mode ? 0x00 : 0x01); return gspca_dev->usb_err; } static void set_led(struct gspca_dev *gspca_dev, int on) { u8 data; reg_r(gspca_dev, TP6800_R18_GPIO_DATA); data = gspca_dev->usb_buf[0]; if (on) data &= ~0x40; else data |= 0x40; reg_w(gspca_dev, TP6800_R18_GPIO_DATA, data); } static void cx0342_6800_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd reg_init[] = { /* fixme: is this useful? */ {TP6800_R17_GPIO_IO, 0x9f}, {TP6800_R16_GPIO_PD, 0x40}, {TP6800_R10_SIF_TYPE, 0x00}, /* i2c 8 bits */ {TP6800_R50, 0x00}, {TP6800_R51, 0x00}, {TP6800_R52, 0xff}, {TP6800_R53, 0x03}, {TP6800_R54_DARK_CFG, 0x07}, {TP6800_R5C_EDGE_THRLD, 0x40}, {TP6800_R7A_BLK_THRLD, 0x40}, {TP6800_R2F_TIMING_CFG, 0x17}, {TP6800_R30_SENSOR_CFG, 0x18}, /* G1B..RG0 */ {TP6800_R37_FRONT_DARK_ST, 0x00}, {TP6800_R38_FRONT_DARK_END, 0x00}, {TP6800_R39_REAR_DARK_ST_L, 0x00}, {TP6800_R3A_REAR_DARK_ST_H, 0x00}, {TP6800_R3B_REAR_DARK_END_L, 0x00}, {TP6800_R3C_REAR_DARK_END_H, 0x00}, {TP6800_R3D_HORIZ_DARK_LINE_L, 0x00}, {TP6800_R3E_HORIZ_DARK_LINE_H, 0x00}, {TP6800_R21_ENDP_1_CTL, 0x03}, {TP6800_R31_PIXEL_START, 0x0b}, {TP6800_R32_PIXEL_END_L, 0x8a}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x0e}, {TP6800_R35_LINE_END_L, 0xf4}, {TP6800_R36_LINE_END_H, 0x01}, {TP6800_R78_FORMAT, 0x00}, {TP6800_R12_SIF_ADDR_S, 0x20}, /* cx0342 i2c addr */ }; static const struct cmd sensor_init[] = { {CX0342_OUTPUT_CTRL, 0x07}, {CX0342_BYPASS_MODE, 0x58}, {CX0342_GPXLTHD_L, 0x16}, {CX0342_RBPXLTHD_L, 0x16}, {CX0342_PLANETHD_L, 0xc0}, {CX0342_PLANETHD_H, 0x03}, {CX0342_RB_GAP_L, 0xff}, {CX0342_RB_GAP_H, 0x07}, {CX0342_G_GAP_L, 0xff}, {CX0342_G_GAP_H, 0x07}, {CX0342_RST_OVERFLOW_L, 0x5c}, {CX0342_RST_OVERFLOW_H, 0x01}, {CX0342_DATA_OVERFLOW_L, 0xfc}, {CX0342_DATA_OVERFLOW_H, 0x03}, {CX0342_DATA_UNDERFLOW_L, 0x00}, {CX0342_DATA_UNDERFLOW_H, 0x00}, {CX0342_SYS_CTRL_0, 0x40}, {CX0342_GLOBAL_GAIN, 0x01}, {CX0342_CLOCK_GEN, 0x00}, {CX0342_SYS_CTRL_0, 0x02}, {CX0342_IDLE_CTRL, 0x05}, {CX0342_ADCGN, 0x00}, {CX0342_ADC_CTL, 0x00}, {CX0342_LVRST_BLBIAS, 0x01}, {CX0342_VTHSEL, 0x0b}, {CX0342_RAMP_RIV, 0x0b}, {CX0342_LDOSEL, 0x07}, {CX0342_SPV_VALUE_L, 0x40}, {CX0342_SPV_VALUE_H, 0x02}, }; reg_w_buf(gspca_dev, reg_init, ARRAY_SIZE(reg_init)); i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); i2c_w_buf(gspca_dev, cx0342_timing_seq, ARRAY_SIZE(cx0342_timing_seq)); reg_w(gspca_dev, TP6800_R5C_EDGE_THRLD, 0x10); reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00); i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x00); i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); set_led(gspca_dev, 1); set_resolution(gspca_dev); } static void cx0342_6810_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd sensor_init_2[] = { {CX0342_EXPO_LINE_L, 0x6f}, {CX0342_EXPO_LINE_H, 0x02}, {CX0342_RAW_GRGAIN_L, 0x00}, {CX0342_RAW_GBGAIN_L, 0x00}, {CX0342_RAW_RGAIN_L, 0x00}, {CX0342_RAW_BGAIN_L, 0x00}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd bridge_init_2[] = { {0x4d, 0x00}, {0x4c, 0xff}, {0x4e, 0xff}, {0x4f, 0x00}, {TP6800_R7A_BLK_THRLD, 0x00}, {TP6800_R79_QUALITY, 0x04}, {TP6800_R79_QUALITY, 0x01}, }; static const struct cmd bridge_init_3[] = { {TP6800_R31_PIXEL_START, 0x08}, {TP6800_R32_PIXEL_END_L, 0x87}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x0e}, {TP6800_R35_LINE_END_L, 0xf4}, {TP6800_R36_LINE_END_H, 0x01}, }; static const struct cmd sensor_init_3[] = { {CX0342_AUTO_ADC_CALIB, 0x81}, {CX0342_EXPO_LINE_L, 0x6f}, {CX0342_EXPO_LINE_H, 0x02}, {CX0342_RAW_GRGAIN_L, 0x00}, {CX0342_RAW_GBGAIN_L, 0x00}, {CX0342_RAW_RGAIN_L, 0x00}, {CX0342_RAW_BGAIN_L, 0x00}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd bridge_init_5[] = { {0x4d, 0x00}, {0x4c, 0xff}, {0x4e, 0xff}, {0x4f, 0x00}, }; static const struct cmd sensor_init_4[] = { {CX0342_EXPO_LINE_L, 0xd3}, {CX0342_EXPO_LINE_H, 0x01}, /*fixme: gains, but 00..80 only*/ {CX0342_RAW_GRGAIN_L, 0x40}, {CX0342_RAW_GBGAIN_L, 0x40}, {CX0342_RAW_RGAIN_L, 0x40}, {CX0342_RAW_BGAIN_L, 0x40}, {CX0342_SYS_CTRL_0, 0x81}, }; static const struct cmd sensor_init_5[] = { {CX0342_IDLE_CTRL, 0x05}, {CX0342_ADCGN, 0x00}, {CX0342_ADC_CTL, 0x00}, {CX0342_LVRST_BLBIAS, 0x01}, {CX0342_VTHSEL, 0x0b}, {CX0342_RAMP_RIV, 0x0b}, {CX0342_LDOSEL, 0x07}, {CX0342_SPV_VALUE_L, 0x40}, {CX0342_SPV_VALUE_H, 0x02}, {CX0342_AUTO_ADC_CALIB, 0x81}, }; reg_w(gspca_dev, 0x22, gspca_dev->alt); i2c_w_buf(gspca_dev, sensor_init_2, ARRAY_SIZE(sensor_init_2)); reg_w_buf(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2)); reg_w_buf(gspca_dev, tp6810_cx_init_common, ARRAY_SIZE(tp6810_cx_init_common)); reg_w_buf(gspca_dev, bridge_init_3, ARRAY_SIZE(bridge_init_3)); if (gspca_dev->curr_mode) { reg_w(gspca_dev, 0x4a, 0x7f); reg_w(gspca_dev, 0x07, 0x05); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ } else { reg_w(gspca_dev, 0x4a, 0xff); reg_w(gspca_dev, 0x07, 0x85); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */ } setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); reg_w_buf(gspca_dev, tp6810_bridge_start, ARRAY_SIZE(tp6810_bridge_start)); setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness)); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342], ARRAY_SIZE(color_gain[0])); reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x87); i2c_w_buf(gspca_dev, sensor_init_3, ARRAY_SIZE(sensor_init_3)); reg_w_buf(gspca_dev, bridge_init_5, ARRAY_SIZE(bridge_init_5)); i2c_w_buf(gspca_dev, sensor_init_4, ARRAY_SIZE(sensor_init_4)); reg_w_buf(gspca_dev, bridge_init_5, ARRAY_SIZE(bridge_init_5)); i2c_w_buf(gspca_dev, sensor_init_5, ARRAY_SIZE(sensor_init_5)); set_led(gspca_dev, 1); /* setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); */ } static void soi763a_6800_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd reg_init[] = { {TP6800_R79_QUALITY, 0x04}, {TP6800_R79_QUALITY, 0x01}, {TP6800_R10_SIF_TYPE, 0x00}, /* i2c 8 bits */ {TP6800_R50, 0x00}, {TP6800_R51, 0x00}, {TP6800_R52, 0xff}, {TP6800_R53, 0x03}, {TP6800_R54_DARK_CFG, 0x07}, {TP6800_R5C_EDGE_THRLD, 0x40}, {TP6800_R79_QUALITY, 0x03}, {TP6800_R7A_BLK_THRLD, 0x40}, {TP6800_R2F_TIMING_CFG, 0x46}, {TP6800_R30_SENSOR_CFG, 0x10}, /* BG1..G0R */ {TP6800_R37_FRONT_DARK_ST, 0x00}, {TP6800_R38_FRONT_DARK_END, 0x00}, {TP6800_R39_REAR_DARK_ST_L, 0x00}, {TP6800_R3A_REAR_DARK_ST_H, 0x00}, {TP6800_R3B_REAR_DARK_END_L, 0x00}, {TP6800_R3C_REAR_DARK_END_H, 0x00}, {TP6800_R3D_HORIZ_DARK_LINE_L, 0x00}, {TP6800_R3E_HORIZ_DARK_LINE_H, 0x00}, {TP6800_R21_ENDP_1_CTL, 0x03}, {TP6800_R3F_FRAME_RATE, 0x04}, /* 15 fps */ {TP6800_R5D_DEMOSAIC_CFG, 0x0e}, /* scale down - medium edge */ {TP6800_R31_PIXEL_START, 0x1b}, {TP6800_R32_PIXEL_END_L, 0x9a}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x0f}, {TP6800_R35_LINE_END_L, 0xf4}, {TP6800_R36_LINE_END_H, 0x01}, {TP6800_R78_FORMAT, 0x01}, /* qvga */ {TP6800_R12_SIF_ADDR_S, 0x21}, /* soi763a i2c addr */ {TP6800_R1A_SIF_TX_DATA2, 0x00}, }; static const struct cmd sensor_init[] = { {0x12, 0x48}, /* mirror - RGB */ {0x13, 0xa0}, /* clock - no AGC nor AEC */ {0x03, 0xa4}, /* saturation */ {0x04, 0x30}, /* hue */ {0x05, 0x88}, /* contrast */ {0x06, 0x60}, /* brightness */ {0x10, 0x41}, /* AEC */ {0x11, 0x40}, /* clock rate */ {0x13, 0xa0}, {0x14, 0x00}, /* 640x480 */ {0x15, 0x14}, {0x1f, 0x41}, {0x20, 0x80}, {0x23, 0xee}, {0x24, 0x50}, {0x25, 0x7a}, {0x26, 0x00}, {0x27, 0xe2}, {0x28, 0xb0}, {0x2a, 0x00}, {0x2b, 0x00}, {0x2d, 0x81}, {0x2f, 0x9d}, {0x60, 0x80}, {0x61, 0x00}, {0x62, 0x88}, {0x63, 0x11}, {0x64, 0x89}, {0x65, 0x00}, {0x67, 0x94}, {0x68, 0x7a}, {0x69, 0x0f}, {0x6c, 0x80}, {0x6d, 0x80}, {0x6e, 0x80}, {0x6f, 0xff}, {0x71, 0x20}, {0x74, 0x20}, {0x75, 0x86}, {0x77, 0xb5}, {0x17, 0x18}, /* H href start */ {0x18, 0xbf}, /* H href end */ {0x19, 0x03}, /* V start */ {0x1a, 0xf8}, /* V end */ {0x01, 0x80}, /* blue gain */ {0x02, 0x80}, /* red gain */ }; reg_w_buf(gspca_dev, reg_init, ARRAY_SIZE(reg_init)); i2c_w(gspca_dev, 0x12, 0x80); /* sensor reset */ msleep(10); i2c_w_buf(gspca_dev, sensor_init, ARRAY_SIZE(sensor_init)); reg_w(gspca_dev, TP6800_R5C_EDGE_THRLD, 0x10); reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00); setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness)); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A], ARRAY_SIZE(color_gain[0])); set_led(gspca_dev, 1); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); if (sd->sensor == SENSOR_SOI763A) setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); } static void soi763a_6810_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const struct cmd bridge_init_2[] = { {TP6800_R7A_BLK_THRLD, 0x00}, {TP6800_R79_QUALITY, 0x04}, {TP6800_R79_QUALITY, 0x01}, }; static const struct cmd bridge_init_3[] = { {TP6800_R31_PIXEL_START, 0x20}, {TP6800_R32_PIXEL_END_L, 0x9f}, {TP6800_R33_PIXEL_END_H, 0x02}, {TP6800_R34_LINE_START, 0x13}, {TP6800_R35_LINE_END_L, 0xf8}, {TP6800_R36_LINE_END_H, 0x01}, }; static const struct cmd bridge_init_6[] = { {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0x5f}, {0x0b, 0x80}, }; reg_w(gspca_dev, 0x22, gspca_dev->alt); bulk_w(gspca_dev, 0x03, color_null, sizeof color_null); reg_w(gspca_dev, 0x59, 0x40); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); reg_w_buf(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2)); reg_w_buf(gspca_dev, tp6810_ov_init_common, ARRAY_SIZE(tp6810_ov_init_common)); reg_w_buf(gspca_dev, bridge_init_3, ARRAY_SIZE(bridge_init_3)); if (gspca_dev->curr_mode) { reg_w(gspca_dev, 0x4a, 0x7f); reg_w(gspca_dev, 0x07, 0x05); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */ } else { reg_w(gspca_dev, 0x4a, 0xff); reg_w(gspca_dev, 0x07, 0x85); reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */ } setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma)); reg_w_buf(gspca_dev, tp6810_bridge_start, ARRAY_SIZE(tp6810_bridge_start)); if (gspca_dev->curr_mode) { reg_w(gspca_dev, 0x4f, 0x00); reg_w(gspca_dev, 0x4e, 0x7c); } reg_w(gspca_dev, 0x00, 0x00); setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness)); bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A], ARRAY_SIZE(color_gain[0])); set_led(gspca_dev, 1); reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0xf0); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); reg_w_buf(gspca_dev, bridge_init_6, ARRAY_SIZE(bridge_init_6)); } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); set_dqt(gspca_dev, sd->quality); if (sd->bridge == BRIDGE_TP6800) { if (sd->sensor == SENSOR_CX0342) cx0342_6800_start(gspca_dev); else soi763a_6800_start(gspca_dev); } else { if (sd->sensor == SENSOR_CX0342) cx0342_6810_start(gspca_dev); else soi763a_6810_start(gspca_dev); reg_w_buf(gspca_dev, tp6810_late_start, ARRAY_SIZE(tp6810_late_start)); reg_w(gspca_dev, 0x80, 0x03); reg_w(gspca_dev, 0x82, gspca_dev->curr_mode ? 0x0a : 0x0e); if (sd->sensor == SENSOR_CX0342) setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), v4l2_ctrl_g_ctrl(sd->blue), v4l2_ctrl_g_ctrl(sd->red)); else setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure), v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0); if (sd->sensor == SENSOR_SOI763A) setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); if (sd->bridge == BRIDGE_TP6810) setautogain(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->autogain)); } setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->bridge == BRIDGE_TP6800) reg_w(gspca_dev, TP6800_R2F_TIMING_CFG, 0x03); set_led(gspca_dev, 0); reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* the start of frame contains: * ff d8 * ff fe * width / 16 * height / 8 * quality */ if (sd->bridge == BRIDGE_TP6810) { if (*data != 0x5a) { /*fixme: don't discard the whole frame..*/ if (*data == 0xaa || *data == 0x00) return; if (*data > 0xc0) { gspca_dbg(gspca_dev, D_FRAM, "bad frame\n"); gspca_dev->last_packet_type = DISCARD_PACKET; return; } } data++; len--; if (len < 2) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } if (*data == 0xff && data[1] == 0xd8) { /*fixme: there may be information in the 4 high bits*/ if (len < 7) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } if ((data[6] & 0x0f) != sd->quality) set_dqt(gspca_dev, data[6] & 0x0f); gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data + 7, len - 7); } else if (data[len - 2] == 0xff && data[len - 1] == 0xd9) { gspca_frame_add(gspca_dev, LAST_PACKET, data, len); } else { gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } return; } switch (*data) { case 0x55: gspca_frame_add(gspca_dev, LAST_PACKET, data, 0); if (len < 8 || data[1] != 0xff || data[2] != 0xd8 || data[3] != 0xff || data[4] != 0xfe) { /* Have only seen this with corrupt frames */ gspca_dev->last_packet_type = DISCARD_PACKET; return; } if (data[7] != sd->quality) set_dqt(gspca_dev, data[7]); gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data + 8, len - 8); break; case 0xaa: gspca_dev->last_packet_type = DISCARD_PACKET; break; case 0xcc: if (len >= 3 && (data[1] != 0xff || data[2] != 0xd8)) gspca_frame_add(gspca_dev, INTER_PACKET, data + 1, len - 1); else gspca_dev->last_packet_type = DISCARD_PACKET; break; } } static void sd_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret, alen; int luma, expo; if (sd->ag_cnt < 0) return; if (--sd->ag_cnt > 5) return; switch (sd->ag_cnt) { /* case 5: */ default: reg_w(gspca_dev, 0x7d, 0x00); break; case 4: reg_w(gspca_dev, 0x27, 0xb0); break; case 3: reg_w(gspca_dev, 0x0c, 0x01); break; case 2: ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x02), gspca_dev->usb_buf, 32, &alen, 500); if (ret < 0) { pr_err("bulk err %d\n", ret); break; } /* values not used (unknown) */ break; case 1: reg_w(gspca_dev, 0x27, 0xd0); break; case 0: ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x02), gspca_dev->usb_buf, 32, &alen, 500); if (ret < 0) { pr_err("bulk err %d\n", ret); break; } luma = ((gspca_dev->usb_buf[8] << 8) + gspca_dev->usb_buf[7] + (gspca_dev->usb_buf[11] << 8) + gspca_dev->usb_buf[10] + (gspca_dev->usb_buf[14] << 8) + gspca_dev->usb_buf[13] + (gspca_dev->usb_buf[17] << 8) + gspca_dev->usb_buf[16] + (gspca_dev->usb_buf[20] << 8) + gspca_dev->usb_buf[19] + (gspca_dev->usb_buf[23] << 8) + gspca_dev->usb_buf[22] + (gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] + (gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28]) / 8; if (gspca_dev->pixfmt.width == 640) luma /= 4; reg_w(gspca_dev, 0x7d, 0x00); expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure); ret = gspca_expo_autogain(gspca_dev, luma, 60, /* desired luma */ 6, /* dead zone */ 2, /* gain knee */ 70); /* expo knee */ sd->ag_cnt = AG_CNT_START; if (sd->bridge == BRIDGE_TP6810) { int new_expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure); if ((expo >= 128 && new_expo < 128) || (expo < 128 && new_expo >= 128)) setframerate(gspca_dev, new_expo); } break; } } /* get stream parameters (framerate) */ static void sd_get_streamparm(struct gspca_dev *gspca_dev, struct v4l2_streamparm *parm) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_captureparm *cp = &parm->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; int fr, i; tpf->numerator = 1; i = get_fr_idx(gspca_dev); if (i & 0x80) { if (sd->bridge == BRIDGE_TP6800) fr = rates[6 - (i & 0x07)]; else fr = rates_6810[7 - (i & 0x07)]; } else { fr = rates[6 - i]; } tpf->denominator = fr; } /* set stream parameters (framerate) */ static void sd_set_streamparm(struct gspca_dev *gspca_dev, struct v4l2_streamparm *parm) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_captureparm *cp = &parm->parm.capture; struct v4l2_fract *tpf = &cp->timeperframe; int fr, i; if (tpf->numerator == 0 || tpf->denominator == 0) sd->framerate = DEFAULT_FRAME_RATE; else sd->framerate = tpf->denominator / tpf->numerator; if (gspca_dev->streaming) setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure)); /* Return the actual framerate */ i = get_fr_idx(gspca_dev); if (i & 0x80) fr = rates_6810[7 - (i & 0x07)]; else fr = rates[6 - i]; tpf->numerator = 1; tpf->denominator = fr; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, const struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor != SENSOR_SOI763A) return -ENOTTY; v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality); return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor != SENSOR_SOI763A) return -ENOTTY; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual); jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_SHARPNESS: setsharpness(gspca_dev, ctrl->val); break; case V4L2_CID_GAMMA: setgamma(gspca_dev, ctrl->val); break; case V4L2_CID_BLUE_BALANCE: setbgain(gspca_dev, ctrl->val); break; case V4L2_CID_RED_BALANCE: setrgain(gspca_dev, ctrl->val); break; case V4L2_CID_EXPOSURE: sd_setgain(gspca_dev); break; case V4L2_CID_AUTOGAIN: if (ctrl->val) break; sd_setgain(gspca_dev); break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: jpeg_set_qual(sd->jpeg_hdr, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 1, 0xdc, 1, 0x4e); if (sd->sensor == SENSOR_CX0342) { sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 4095, 1, 256); sd->blue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 4095, 1, 256); } if (sd->sensor == SENSOR_SOI763A) gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 15, 1, 3); else gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 4095, 1, 256); sd->sharpness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 3, 1, 2); sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAMMA, 0, NGAMMA - 1, 1, (sd->sensor == SENSOR_SOI763A && sd->bridge == BRIDGE_TP6800) ? 0 : 1); if (sd->bridge == BRIDGE_TP6810) gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (sd->sensor == SENSOR_SOI763A) sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, 15, 1, (sd->bridge == BRIDGE_TP6810) ? 0 : 13); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } if (gspca_dev->autogain) v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); else v4l2_ctrl_cluster(2, &gspca_dev->exposure); return 0; } static const struct sd_desc sd_desc = { .name = KBUILD_MODNAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = sd_dq_callback, .get_streamparm = sd_get_streamparm, .set_streamparm = sd_set_streamparm, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06a2, 0x0003), .driver_info = BRIDGE_TP6800}, {USB_DEVICE(0x06a2, 0x6810), .driver_info = BRIDGE_TP6810}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_probe(struct usb_interface *interface, const struct usb_device_id *id) { return gspca_dev_probe(interface, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = KBUILD_MODNAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(force_sensor, int, 0644); MODULE_PARM_DESC(force_sensor, "Force sensor. 0: cx0342, 1: soi763a");
13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 13 2 12 13 13 13 12 2 13 8 8 13 13 13 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 // SPDX-License-Identifier: GPL-2.0 /* * power_supply_hwmon.c - power supply hwmon support. */ #include <linux/err.h> #include <linux/hwmon.h> #include <linux/power_supply.h> #include <linux/slab.h> #include "power_supply.h" struct power_supply_hwmon { struct power_supply *psy; unsigned long *props; }; static const char *const ps_temp_label[] = { "temp", "ambient temp", }; static int power_supply_hwmon_in_to_property(u32 attr) { switch (attr) { case hwmon_in_average: return POWER_SUPPLY_PROP_VOLTAGE_AVG; case hwmon_in_min: return POWER_SUPPLY_PROP_VOLTAGE_MIN; case hwmon_in_max: return POWER_SUPPLY_PROP_VOLTAGE_MAX; case hwmon_in_input: return POWER_SUPPLY_PROP_VOLTAGE_NOW; default: return -EINVAL; } } static int power_supply_hwmon_curr_to_property(u32 attr) { switch (attr) { case hwmon_curr_average: return POWER_SUPPLY_PROP_CURRENT_AVG; case hwmon_curr_max: return POWER_SUPPLY_PROP_CURRENT_MAX; case hwmon_curr_input: return POWER_SUPPLY_PROP_CURRENT_NOW; default: return -EINVAL; } } static int power_supply_hwmon_power_to_property(u32 attr) { switch (attr) { case hwmon_power_input: return POWER_SUPPLY_PROP_POWER_NOW; case hwmon_power_average: return POWER_SUPPLY_PROP_POWER_AVG; default: return -EINVAL; } } static int power_supply_hwmon_temp_to_property(u32 attr, int channel) { if (channel) { switch (attr) { case hwmon_temp_input: return POWER_SUPPLY_PROP_TEMP_AMBIENT; case hwmon_temp_min_alarm: return POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN; case hwmon_temp_max_alarm: return POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX; default: break; } } else { switch (attr) { case hwmon_temp_input: return POWER_SUPPLY_PROP_TEMP; case hwmon_temp_max: return POWER_SUPPLY_PROP_TEMP_MAX; case hwmon_temp_min: return POWER_SUPPLY_PROP_TEMP_MIN; case hwmon_temp_min_alarm: return POWER_SUPPLY_PROP_TEMP_ALERT_MIN; case hwmon_temp_max_alarm: return POWER_SUPPLY_PROP_TEMP_ALERT_MAX; default: break; } } return -EINVAL; } static int power_supply_hwmon_to_property(enum hwmon_sensor_types type, u32 attr, int channel) { switch (type) { case hwmon_in: return power_supply_hwmon_in_to_property(attr); case hwmon_curr: return power_supply_hwmon_curr_to_property(attr); case hwmon_power: return power_supply_hwmon_power_to_property(attr); case hwmon_temp: return power_supply_hwmon_temp_to_property(attr, channel); default: return -EINVAL; } } static bool power_supply_hwmon_is_a_label(enum hwmon_sensor_types type, u32 attr) { return type == hwmon_temp && attr == hwmon_temp_label; } struct hwmon_type_attr_list { const u32 *attrs; size_t n_attrs; }; static const u32 ps_temp_attrs[] = { hwmon_temp_input, hwmon_temp_min, hwmon_temp_max, hwmon_temp_min_alarm, hwmon_temp_max_alarm, }; static const struct hwmon_type_attr_list ps_type_attrs[hwmon_max] = { [hwmon_temp] = { ps_temp_attrs, ARRAY_SIZE(ps_temp_attrs) }, }; static bool power_supply_hwmon_has_input( const struct power_supply_hwmon *psyhw, enum hwmon_sensor_types type, int channel) { const struct hwmon_type_attr_list *attr_list = &ps_type_attrs[type]; size_t i; for (i = 0; i < attr_list->n_attrs; ++i) { int prop = power_supply_hwmon_to_property(type, attr_list->attrs[i], channel); if (prop >= 0 && test_bit(prop, psyhw->props)) return true; } return false; } static bool power_supply_hwmon_is_writable(enum hwmon_sensor_types type, u32 attr) { switch (type) { case hwmon_in: return attr == hwmon_in_min || attr == hwmon_in_max; case hwmon_curr: return attr == hwmon_curr_max; case hwmon_temp: return attr == hwmon_temp_max || attr == hwmon_temp_min || attr == hwmon_temp_min_alarm || attr == hwmon_temp_max_alarm; default: return false; } } static umode_t power_supply_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { const struct power_supply_hwmon *psyhw = data; int prop; if (power_supply_hwmon_is_a_label(type, attr)) { if (power_supply_hwmon_has_input(psyhw, type, channel)) return 0444; else return 0; } prop = power_supply_hwmon_to_property(type, attr, channel); if (prop < 0 || !test_bit(prop, psyhw->props)) return 0; if (power_supply_property_is_writeable(psyhw->psy, prop) > 0 && power_supply_hwmon_is_writable(type, attr)) return 0644; return 0444; } static int power_supply_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { switch (type) { case hwmon_temp: *str = ps_temp_label[channel]; break; default: /* unreachable, but see: * gcc bug #51513 [1] and clang bug #978 [2] * * [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51513 * [2] https://github.com/ClangBuiltLinux/linux/issues/978 */ break; } return 0; } static int power_supply_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct power_supply_hwmon *psyhw = dev_get_drvdata(dev); struct power_supply *psy = psyhw->psy; union power_supply_propval pspval; int ret, prop; prop = power_supply_hwmon_to_property(type, attr, channel); if (prop < 0) return prop; ret = power_supply_get_property(psy, prop, &pspval); if (ret) return ret; switch (type) { /* * Both voltage and current is reported in units of * microvolts/microamps, so we need to adjust it to * milliamps(volts) */ case hwmon_curr: case hwmon_in: pspval.intval = DIV_ROUND_CLOSEST(pspval.intval, 1000); break; case hwmon_power: /* * Power properties are already in microwatts. */ break; /* * Temp needs to be converted from 1/10 C to milli-C */ case hwmon_temp: if (check_mul_overflow(pspval.intval, 100, &pspval.intval)) return -EOVERFLOW; break; default: return -EINVAL; } *val = pspval.intval; return 0; } static int power_supply_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { struct power_supply_hwmon *psyhw = dev_get_drvdata(dev); struct power_supply *psy = psyhw->psy; union power_supply_propval pspval; int prop; prop = power_supply_hwmon_to_property(type, attr, channel); if (prop < 0) return prop; pspval.intval = val; switch (type) { /* * Both voltage and current is reported in units of * microvolts/microamps, so we need to adjust it to * milliamps(volts) */ case hwmon_curr: case hwmon_in: if (check_mul_overflow(pspval.intval, 1000, &pspval.intval)) return -EOVERFLOW; break; /* * Temp needs to be converted from 1/10 C to milli-C */ case hwmon_temp: pspval.intval = DIV_ROUND_CLOSEST(pspval.intval, 100); break; default: return -EINVAL; } return power_supply_set_property(psy, prop, &pspval); } static const struct hwmon_ops power_supply_hwmon_ops = { .is_visible = power_supply_hwmon_is_visible, .read = power_supply_hwmon_read, .write = power_supply_hwmon_write, .read_string = power_supply_hwmon_read_string, }; static const struct hwmon_channel_info * const power_supply_hwmon_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM, HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM), HWMON_CHANNEL_INFO(curr, HWMON_C_AVERAGE | HWMON_C_MAX | HWMON_C_INPUT), HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_AVERAGE), HWMON_CHANNEL_INFO(in, HWMON_I_AVERAGE | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_INPUT), NULL }; static const struct hwmon_chip_info power_supply_hwmon_chip_info = { .ops = &power_supply_hwmon_ops, .info = power_supply_hwmon_info, }; static const enum power_supply_property power_supply_hwmon_props[] = { POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_POWER_AVG, POWER_SUPPLY_PROP_POWER_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, POWER_SUPPLY_PROP_TEMP_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; int power_supply_add_hwmon_sysfs(struct power_supply *psy) { struct power_supply_hwmon *psyhw; struct device *dev = &psy->dev; struct device *hwmon; int ret, i; const char *name; if (!devres_open_group(dev, power_supply_add_hwmon_sysfs, GFP_KERNEL)) return -ENOMEM; psyhw = devm_kzalloc(dev, sizeof(*psyhw), GFP_KERNEL); if (!psyhw) { ret = -ENOMEM; goto error; } psyhw->psy = psy; psyhw->props = devm_bitmap_zalloc(dev, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG + 1, GFP_KERNEL); if (!psyhw->props) { ret = -ENOMEM; goto error; } for (i = 0; i < ARRAY_SIZE(power_supply_hwmon_props); i++) { const enum power_supply_property prop = power_supply_hwmon_props[i]; if (power_supply_has_property(psy, prop)) set_bit(prop, psyhw->props); } name = psy->desc->name; if (strchr(name, '-')) { char *new_name; new_name = devm_kstrdup(dev, name, GFP_KERNEL); if (!new_name) { ret = -ENOMEM; goto error; } strreplace(new_name, '-', '_'); name = new_name; } hwmon = devm_hwmon_device_register_with_info(dev, name, psyhw, &power_supply_hwmon_chip_info, NULL); ret = PTR_ERR_OR_ZERO(hwmon); if (ret) goto error; devres_close_group(dev, power_supply_add_hwmon_sysfs); return 0; error: devres_release_group(dev, NULL); return ret; } void power_supply_remove_hwmon_sysfs(struct power_supply *psy) { devres_release_group(&psy->dev, power_supply_add_hwmon_sysfs); }
2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 // SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB adapter * Derived from the PCAN project file driver/src/pcan_usb.c * * Copyright (C) 2003-2010 PEAK System-Technik GmbH * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com> * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> */ #include <linux/unaligned.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" /* PCAN-USB Endpoints */ #define PCAN_USB_EP_CMDOUT 1 #define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN) #define PCAN_USB_EP_MSGOUT 2 #define PCAN_USB_EP_MSGIN (PCAN_USB_EP_MSGOUT | USB_DIR_IN) /* PCAN-USB command struct */ #define PCAN_USB_CMD_FUNC 0 #define PCAN_USB_CMD_NUM 1 #define PCAN_USB_CMD_ARGS 2 #define PCAN_USB_CMD_ARGS_LEN 14 #define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \ PCAN_USB_CMD_ARGS_LEN) /* PCAN-USB commands */ #define PCAN_USB_CMD_BITRATE 1 #define PCAN_USB_CMD_SET_BUS 3 #define PCAN_USB_CMD_DEVID 4 #define PCAN_USB_CMD_SN 6 #define PCAN_USB_CMD_REGISTER 9 #define PCAN_USB_CMD_EXT_VCC 10 #define PCAN_USB_CMD_ERR_FR 11 #define PCAN_USB_CMD_LED 12 /* PCAN_USB_CMD_SET_BUS number arg */ #define PCAN_USB_BUS_XCVER 2 #define PCAN_USB_BUS_SILENT_MODE 3 /* PCAN_USB_CMD_xxx functions */ #define PCAN_USB_GET 1 #define PCAN_USB_SET 2 /* PCAN-USB command timeout (ms.) */ #define PCAN_USB_COMMAND_TIMEOUT 1000 /* PCAN-USB startup timeout (ms.) */ #define PCAN_USB_STARTUP_TIMEOUT 10 /* PCAN-USB rx/tx buffers size */ #define PCAN_USB_RX_BUFFER_SIZE 64 #define PCAN_USB_TX_BUFFER_SIZE 64 #define PCAN_USB_MSG_HEADER_LEN 2 #define PCAN_USB_MSG_TX_CAN 2 /* Tx msg is a CAN frame */ /* PCAN-USB adapter internal clock (MHz) */ #define PCAN_USB_CRYSTAL_HZ 16000000 /* PCAN-USB USB message record status/len field */ #define PCAN_USB_STATUSLEN_TIMESTAMP (1 << 7) #define PCAN_USB_STATUSLEN_INTERNAL (1 << 6) #define PCAN_USB_STATUSLEN_EXT_ID (1 << 5) #define PCAN_USB_STATUSLEN_RTR (1 << 4) #define PCAN_USB_STATUSLEN_DLC (0xf) /* PCAN-USB 4.1 CAN Id tx extended flags */ #define PCAN_USB_TX_SRR 0x01 /* SJA1000 SRR command */ #define PCAN_USB_TX_AT 0x02 /* SJA1000 AT command */ /* PCAN-USB error flags */ #define PCAN_USB_ERROR_TXFULL 0x01 #define PCAN_USB_ERROR_RXQOVR 0x02 #define PCAN_USB_ERROR_BUS_LIGHT 0x04 #define PCAN_USB_ERROR_BUS_HEAVY 0x08 #define PCAN_USB_ERROR_BUS_OFF 0x10 #define PCAN_USB_ERROR_RXQEMPTY 0x20 #define PCAN_USB_ERROR_QOVR 0x40 #define PCAN_USB_ERROR_TXQFULL 0x80 #define PCAN_USB_ERROR_BUS (PCAN_USB_ERROR_BUS_LIGHT | \ PCAN_USB_ERROR_BUS_HEAVY | \ PCAN_USB_ERROR_BUS_OFF) /* SJA1000 modes */ #define SJA1000_MODE_NORMAL 0x00 #define SJA1000_MODE_INIT 0x01 /* * tick duration = 42.666 us => * (tick_number * 44739243) >> 20 ~ (tick_number * 42666) / 1000 * accuracy = 10^-7 */ #define PCAN_USB_TS_DIV_SHIFTER 20 #define PCAN_USB_TS_US_PER_TICK 44739243 /* PCAN-USB messages record types */ #define PCAN_USB_REC_ERROR 1 #define PCAN_USB_REC_ANALOG 2 #define PCAN_USB_REC_BUSLOAD 3 #define PCAN_USB_REC_TS 4 #define PCAN_USB_REC_BUSEVT 5 /* CAN bus events notifications selection mask */ #define PCAN_USB_ERR_RXERR 0x02 /* ask for rxerr counter */ #define PCAN_USB_ERR_TXERR 0x04 /* ask for txerr counter */ /* This mask generates an usb packet each time the state of the bus changes. * In other words, its interest is to know which side among rx and tx is * responsible of the change of the bus state. */ #define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR) /* identify bus event packets with rx/tx error counters */ #define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */ #define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */ /* private to PCAN-USB adapter */ struct pcan_usb { struct peak_usb_device dev; struct peak_time_ref time_ref; struct timer_list restart_timer; struct can_berr_counter bec; }; /* incoming message context for decoding */ struct pcan_usb_msg_context { u16 ts16; u8 prev_ts8; u8 *ptr; u8 *end; u8 rec_cnt; u8 rec_idx; u8 rec_ts_idx; struct net_device *netdev; struct pcan_usb *pdev; }; /* * send a command */ static int pcan_usb_send_cmd(struct peak_usb_device *dev, u8 f, u8 n, u8 *p) { int err; int actual_length; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; dev->cmd_buf[PCAN_USB_CMD_FUNC] = f; dev->cmd_buf[PCAN_USB_CMD_NUM] = n; if (p) memcpy(dev->cmd_buf + PCAN_USB_CMD_ARGS, p, PCAN_USB_CMD_ARGS_LEN); err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT), dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length, PCAN_USB_COMMAND_TIMEOUT); if (err) netdev_err(dev->netdev, "sending cmd f=0x%x n=0x%x failure: %d\n", f, n, err); return err; } /* * send a command then wait for its response */ static int pcan_usb_wait_rsp(struct peak_usb_device *dev, u8 f, u8 n, u8 *p) { int err; int actual_length; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; /* first, send command */ err = pcan_usb_send_cmd(dev, f, n, NULL); if (err) return err; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, PCAN_USB_EP_CMDIN), dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length, PCAN_USB_COMMAND_TIMEOUT); if (err) netdev_err(dev->netdev, "waiting rsp f=0x%x n=0x%x failure: %d\n", f, n, err); else if (p) memcpy(p, dev->cmd_buf + PCAN_USB_CMD_ARGS, PCAN_USB_CMD_ARGS_LEN); return err; } static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [1] = mode, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_REGISTER, PCAN_USB_SET, args); } static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_XCVER, args); } static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_SILENT_MODE, args); } /* send the cmd to be notified from bus errors */ static int pcan_usb_set_err_frame(struct peak_usb_device *dev, u8 err_mask) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = err_mask, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_ERR_FR, PCAN_USB_SET, args); } static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args); } static int pcan_usb_set_led(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_LED, PCAN_USB_SET, args); } /* * set bittiming value to can */ static int pcan_usb_set_bittiming(struct peak_usb_device *dev, struct can_bittiming *bt) { u8 args[PCAN_USB_CMD_ARGS_LEN]; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; netdev_info(dev->netdev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); args[0] = btr1; args[1] = btr0; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_BITRATE, PCAN_USB_SET, args); } /* * init/reset can */ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff) { int err; err = pcan_usb_set_bus(dev, onoff); if (err) return err; if (!onoff) { err = pcan_usb_set_sja1000(dev, SJA1000_MODE_INIT); } else { /* the PCAN-USB needs time to init */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT)); } return err; } /* * handle end of waiting for the device to reset */ static void pcan_usb_restart(struct timer_list *t) { struct pcan_usb *pdev = from_timer(pdev, t, restart_timer); struct peak_usb_device *dev = &pdev->dev; /* notify candev and netdev */ peak_usb_restart_complete(dev); } /* * handle the submission of the restart urb */ static void pcan_usb_restart_pending(struct urb *urb) { struct pcan_usb *pdev = urb->context; /* the PCAN-USB needs time to restart */ mod_timer(&pdev->restart_timer, jiffies + msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT)); /* can delete usb resources */ peak_usb_async_complete(urb); } /* * handle asynchronous restart */ static int pcan_usb_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); if (timer_pending(&pdev->restart_timer)) return -EBUSY; /* set bus on */ buf[PCAN_USB_CMD_FUNC] = 3; buf[PCAN_USB_CMD_NUM] = 2; buf[PCAN_USB_CMD_ARGS] = 1; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT), buf, PCAN_USB_CMD_LEN, pcan_usb_restart_pending, pdev); return usb_submit_urb(urb, GFP_ATOMIC); } /* * read serial number from device */ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number) { u8 args[PCAN_USB_CMD_ARGS_LEN]; int err; err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args); if (err) return err; *serial_number = le32_to_cpup((__le32 *)args); return 0; } /* * read can channel id from device */ static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id) { u8 args[PCAN_USB_CMD_ARGS_LEN]; int err; err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args); if (err) netdev_err(dev->netdev, "getting can channel id failure: %d\n", err); else *can_ch_id = args[0]; return err; } /* set a new CAN channel id in the flash memory of the device */ static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id) { u8 args[PCAN_USB_CMD_ARGS_LEN]; /* this kind of device supports 8-bit values only */ if (can_ch_id > U8_MAX) return -EINVAL; /* during the flash process the device disconnects during ~1.25 s.: * prohibit access when interface is UP */ if (dev->netdev->flags & IFF_UP) return -EBUSY; args[0] = can_ch_id; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args); } /* * update current time ref with received timestamp */ static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) { if ((mc->ptr + 2) > mc->end) return -EINVAL; mc->ts16 = get_unaligned_le16(mc->ptr); if (mc->rec_idx > 0) peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16); else peak_usb_set_ts_now(&mc->pdev->time_ref, mc->ts16); return 0; } /* * decode received timestamp */ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet) { /* only 1st packet supplies a word timestamp */ if (first_packet) { if ((mc->ptr + 2) > mc->end) return -EINVAL; mc->ts16 = get_unaligned_le16(mc->ptr); mc->prev_ts8 = mc->ts16 & 0x00ff; mc->ptr += 2; } else { u8 ts8; if ((mc->ptr + 1) > mc->end) return -EINVAL; ts8 = *mc->ptr++; if (ts8 < mc->prev_ts8) mc->ts16 += 0x100; mc->ts16 &= 0xff00; mc->ts16 |= ts8; mc->prev_ts8 = ts8; } return 0; } static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, u8 status_len) { struct sk_buff *skb; struct can_frame *cf; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; /* ignore this error until 1st ts received */ if (n == PCAN_USB_ERROR_QOVR) if (!mc->pdev->time_ref.tick_count) return 0; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(mc->netdev, &cf); if (n & PCAN_USB_ERROR_RXQOVR) { /* data overrun interrupt */ netdev_dbg(mc->netdev, "data overrun interrupt\n"); mc->netdev->stats.rx_over_errors++; mc->netdev->stats.rx_errors++; if (cf) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; } } if (n & PCAN_USB_ERROR_TXQFULL) netdev_dbg(mc->netdev, "device Tx queue full)\n"); if (n & PCAN_USB_ERROR_BUS_OFF) { new_state = CAN_STATE_BUS_OFF; } else if (n & PCAN_USB_ERROR_BUS_HEAVY) { new_state = ((mc->pdev->bec.txerr >= 128) || (mc->pdev->bec.rxerr >= 128)) ? CAN_STATE_ERROR_PASSIVE : CAN_STATE_ERROR_WARNING; } else { new_state = CAN_STATE_ERROR_ACTIVE; } /* handle change of state */ if (new_state != mc->pdev->dev.can.state) { enum can_state tx_state = (mc->pdev->bec.txerr >= mc->pdev->bec.rxerr) ? new_state : 0; enum can_state rx_state = (mc->pdev->bec.txerr <= mc->pdev->bec.rxerr) ? new_state : 0; can_change_state(mc->netdev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { can_bus_off(mc->netdev); } else if (cf && (cf->can_id & CAN_ERR_CRTL)) { /* Supply TX/RX error counters in case of * controller error. */ cf->can_id = CAN_ERR_CNT; cf->data[6] = mc->pdev->bec.txerr; cf->data[7] = mc->pdev->bec.rxerr; } } if (!skb) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp); } netif_rx(skb); return 0; } /* decode bus event usb packet: first byte contains rxerr while 2nd one contains * txerr. */ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir) { struct pcan_usb *pdev = mc->pdev; /* according to the content of the packet */ switch (ir) { case PCAN_USB_ERR_CNT_DEC: case PCAN_USB_ERR_CNT_INC: /* save rx/tx error counters from in the device context */ pdev->bec.rxerr = mc->ptr[1]; pdev->bec.txerr = mc->ptr[2]; break; default: /* reserved */ break; } return 0; } /* * decode non-data usb message */ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc, u8 status_len) { u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC; u8 f, n; int err; /* check whether function and number can be read */ if ((mc->ptr + 2) > mc->end) return -EINVAL; f = mc->ptr[PCAN_USB_CMD_FUNC]; n = mc->ptr[PCAN_USB_CMD_NUM]; mc->ptr += PCAN_USB_CMD_ARGS; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx); if (err) return err; /* Next packet in the buffer will have a timestamp on a single * byte */ mc->rec_ts_idx++; } switch (f) { case PCAN_USB_REC_ERROR: err = pcan_usb_decode_error(mc, n, status_len); if (err) return err; break; case PCAN_USB_REC_ANALOG: /* analog values (ignored) */ rec_len = 2; break; case PCAN_USB_REC_BUSLOAD: /* bus load (ignored) */ rec_len = 1; break; case PCAN_USB_REC_TS: /* only timestamp */ if (pcan_usb_update_ts(mc)) return -EINVAL; break; case PCAN_USB_REC_BUSEVT: /* bus event notifications (get rxerr/txerr) */ err = pcan_usb_handle_bus_evt(mc, n); if (err) return err; break; default: netdev_err(mc->netdev, "unexpected function %u\n", f); break; } if ((mc->ptr + rec_len) > mc->end) return -EINVAL; mc->ptr += rec_len; return 0; } /* * decode data usb message */ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) { u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC; struct sk_buff *skb; struct can_frame *cf; struct skb_shared_hwtstamps *hwts; u32 can_id_flags; skb = alloc_can_skb(mc->netdev, &cf); if (!skb) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { if ((mc->ptr + 4) > mc->end) goto decode_failed; can_id_flags = get_unaligned_le32(mc->ptr); cf->can_id = can_id_flags >> 3 | CAN_EFF_FLAG; mc->ptr += 4; } else { if ((mc->ptr + 2) > mc->end) goto decode_failed; can_id_flags = get_unaligned_le16(mc->ptr); cf->can_id = can_id_flags >> 5; mc->ptr += 2; } can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode); /* Only first packet timestamp is a word */ if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) goto decode_failed; /* Next packet in the buffer will have a timestamp on a single byte */ mc->rec_ts_idx++; /* read data */ memset(cf->data, 0x0, sizeof(cf->data)); if (status_len & PCAN_USB_STATUSLEN_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { if ((mc->ptr + rec_len) > mc->end) goto decode_failed; memcpy(cf->data, mc->ptr, cf->len); mc->ptr += rec_len; /* Ignore next byte (client private id) if SRR bit is set */ if (can_id_flags & PCAN_USB_TX_SRR) mc->ptr++; /* update statistics */ mc->netdev->stats.rx_bytes += cf->len; } mc->netdev->stats.rx_packets++; /* convert timestamp into kernel time */ hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp); /* push the skb */ netif_rx(skb); return 0; decode_failed: dev_kfree_skb(skb); return -EINVAL; } /* * process incoming message */ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf) { struct pcan_usb_msg_context mc = { .rec_cnt = ibuf[1], .ptr = ibuf + PCAN_USB_MSG_HEADER_LEN, .end = ibuf + lbuf, .netdev = dev->netdev, .pdev = container_of(dev, struct pcan_usb, dev), }; int err; for (err = 0; mc.rec_idx < mc.rec_cnt && !err; mc.rec_idx++) { u8 sl = *mc.ptr++; /* handle status and error frames here */ if (sl & PCAN_USB_STATUSLEN_INTERNAL) { err = pcan_usb_decode_status(&mc, sl); /* handle normal can frames here */ } else { err = pcan_usb_decode_data(&mc, sl); } } return err; } /* * process any incoming buffer */ static int pcan_usb_decode_buf(struct peak_usb_device *dev, struct urb *urb) { int err = 0; if (urb->actual_length > PCAN_USB_MSG_HEADER_LEN) { err = pcan_usb_decode_msg(dev, urb->transfer_buffer, urb->actual_length); } else if (urb->actual_length > 0) { netdev_err(dev->netdev, "usb message length error (%u)\n", urb->actual_length); err = -EINVAL; } return err; } /* * process outgoing packet */ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size) { struct net_device *netdev = dev->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id_flags = cf->can_id & CAN_ERR_MASK; u8 *pc; obuf[0] = PCAN_USB_MSG_TX_CAN; obuf[1] = 1; /* only one CAN frame is stored in the packet */ pc = obuf + PCAN_USB_MSG_HEADER_LEN; /* status/len byte */ *pc = can_get_cc_dlc(cf, dev->can.ctrlmode); if (cf->can_id & CAN_RTR_FLAG) *pc |= PCAN_USB_STATUSLEN_RTR; /* can id */ if (cf->can_id & CAN_EFF_FLAG) { *pc |= PCAN_USB_STATUSLEN_EXT_ID; pc++; can_id_flags <<= 3; if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) can_id_flags |= PCAN_USB_TX_SRR; if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) can_id_flags |= PCAN_USB_TX_AT; put_unaligned_le32(can_id_flags, pc); pc += 4; } else { pc++; can_id_flags <<= 5; if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) can_id_flags |= PCAN_USB_TX_SRR; if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) can_id_flags |= PCAN_USB_TX_AT; put_unaligned_le16(can_id_flags, pc); pc += 2; } /* can data */ if (!(cf->can_id & CAN_RTR_FLAG)) { memcpy(pc, cf->data, cf->len); pc += cf->len; } /* SRR bit needs a writer id (useless here) */ if (can_id_flags & PCAN_USB_TX_SRR) *pc++ = 0x80; obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff); return 0; } /* socket callback used to copy berr counters values received through USB */ static int pcan_usb_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct peak_usb_device *dev = netdev_priv(netdev); struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); *bec = pdev->bec; /* must return 0 */ return 0; } /* * start interface */ static int pcan_usb_start(struct peak_usb_device *dev) { struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); int err; /* number of bits used in timestamps read from adapter struct */ peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb); pdev->bec.rxerr = 0; pdev->bec.txerr = 0; /* always ask the device for BERR reporting, to be able to switch from * WARNING to PASSIVE state */ err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK); if (err) netdev_warn(dev->netdev, "Asking for BERR reporting error %u\n", err); /* if revision greater than 3, can put silent mode on/off */ if (dev->device_rev > 3) { err = pcan_usb_set_silent(dev, dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY); if (err) return err; } return pcan_usb_set_ext_vcc(dev, 0); } static int pcan_usb_init(struct peak_usb_device *dev) { struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); u32 serial_number; int err; /* initialize a timer needed to wait for hardware restart */ timer_setup(&pdev->restart_timer, pcan_usb_restart, 0); /* * explicit use of dev_xxx() instead of netdev_xxx() here: * information displayed are related to the device itself, not * to the canx netdevice. */ err = pcan_usb_get_serial(dev, &serial_number); if (err) { dev_err(dev->netdev->dev.parent, "unable to read %s serial number (err %d)\n", pcan_usb.name, err); return err; } dev_info(dev->netdev->dev.parent, "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n", pcan_usb.name, dev->device_rev, serial_number, pcan_usb.ctrl_count); /* Since rev 4.1, PCAN-USB is able to make single-shot as well as * looped back frames. */ if (dev->device_rev >= 41) { struct can_priv *priv = netdev_priv(dev->netdev); priv->ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_LOOPBACK; } else { dev_info(dev->netdev->dev.parent, "Firmware update available. Please contact support@peak-system.com\n"); } return 0; } /* * probe function for new PCAN-USB usb interface */ static int pcan_usb_probe(struct usb_interface *intf) { struct usb_host_interface *if_desc; int i; if_desc = intf->altsetting; /* check interface endpoint addresses */ for (i = 0; i < if_desc->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc; switch (ep->bEndpointAddress) { case PCAN_USB_EP_CMDOUT: case PCAN_USB_EP_CMDIN: case PCAN_USB_EP_MSGOUT: case PCAN_USB_EP_MSGIN: break; default: return -ENODEV; } } return 0; } static int pcan_usb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; switch (state) { case ETHTOOL_ID_ACTIVE: /* call ON/OFF twice a second */ return 2; case ETHTOOL_ID_OFF: err = pcan_usb_set_led(dev, 0); break; case ETHTOOL_ID_ON: fallthrough; case ETHTOOL_ID_INACTIVE: /* restore LED default */ err = pcan_usb_set_led(dev, 1); break; default: break; } return err; } /* This device only handles 8-bit CAN channel id. */ static int pcan_usb_get_eeprom_len(struct net_device *netdev) { return sizeof(u8); } static const struct ethtool_ops pcan_usb_ethtool_ops = { .set_phys_id = pcan_usb_set_phys_id, .get_ts_info = pcan_get_ts_info, .get_eeprom_len = pcan_usb_get_eeprom_len, .get_eeprom = peak_usb_get_eeprom, .set_eeprom = peak_usb_set_eeprom, }; /* * describe the PCAN-USB adapter */ static const struct can_bittiming_const pcan_usb_const = { .name = "pcan_usb", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb = { .name = "PCAN-USB", .device_id = PCAN_USB_PRODUCT_ID, .ctrl_count = 1, .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_USB_CRYSTAL_HZ / 2, }, .bittiming_const = &pcan_usb_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb), .ethtool_ops = &pcan_usb_ethtool_ops, /* timestamps usage */ .ts_used_bits = 16, .us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */ .us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */ /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USB_EP_MSGIN, .ep_msg_out = {PCAN_USB_EP_MSGOUT}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_USB_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_USB_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_probe, .dev_init = pcan_usb_init, .dev_set_bus = pcan_usb_write_mode, .dev_set_bittiming = pcan_usb_set_bittiming, .dev_get_can_channel_id = pcan_usb_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_set_can_channel_id, .dev_decode_buf = pcan_usb_decode_buf, .dev_encode_msg = pcan_usb_encode_msg, .dev_start = pcan_usb_start, .dev_restart_async = pcan_usb_restart_async, .do_get_berr_counter = pcan_usb_get_berr_counter, };
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 6 1 5 5 5 5 1 4 3 3 3 2 2 2 2 2 2 6 3 3 1 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 // SPDX-License-Identifier: GPL-2.0-only /* * HID driver for Logitech receivers * * Copyright (c) 2011 Logitech */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/kfifo.h> #include <linux/delay.h> #include <linux/usb.h> /* For to_usb_interface for kvm extra intf check */ #include <linux/unaligned.h> #include "hid-ids.h" #define DJ_MAX_PAIRED_DEVICES 7 #define DJ_MAX_NUMBER_NOTIFS 8 #define DJ_RECEIVER_INDEX 0 #define DJ_DEVICE_INDEX_MIN 1 #define DJ_DEVICE_INDEX_MAX 7 #define DJREPORT_SHORT_LENGTH 15 #define DJREPORT_LONG_LENGTH 32 #define REPORT_ID_DJ_SHORT 0x20 #define REPORT_ID_DJ_LONG 0x21 #define REPORT_ID_HIDPP_SHORT 0x10 #define REPORT_ID_HIDPP_LONG 0x11 #define REPORT_ID_HIDPP_VERY_LONG 0x12 #define HIDPP_REPORT_SHORT_LENGTH 7 #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_RECEIVER_INDEX 0xff #define REPORT_TYPE_RFREPORT_FIRST 0x01 #define REPORT_TYPE_RFREPORT_LAST 0x1F /* Command Switch to DJ mode */ #define REPORT_TYPE_CMD_SWITCH 0x80 #define CMD_SWITCH_PARAM_DEVBITFIELD 0x00 #define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01 #define TIMEOUT_NO_KEEPALIVE 0x00 /* Command to Get the list of Paired devices */ #define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81 /* Device Paired Notification */ #define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41 #define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01 #define SPFUNCTION_DEVICE_LIST_EMPTY 0x02 #define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00 #define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01 #define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02 #define DEVICE_PAIRED_RF_REPORT_TYPE 0x03 /* Device Un-Paired Notification */ #define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40 /* Connection Status Notification */ #define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42 #define CONNECTION_STATUS_PARAM_STATUS 0x00 #define STATUS_LINKLOSS 0x01 /* Error Notification */ #define REPORT_TYPE_NOTIF_ERROR 0x7F #define NOTIF_ERROR_PARAM_ETYPE 0x00 #define ETYPE_KEEPALIVE_TIMEOUT 0x01 /* supported DJ HID && RF report types */ #define REPORT_TYPE_KEYBOARD 0x01 #define REPORT_TYPE_MOUSE 0x02 #define REPORT_TYPE_CONSUMER_CONTROL 0x03 #define REPORT_TYPE_SYSTEM_CONTROL 0x04 #define REPORT_TYPE_MEDIA_CENTER 0x08 #define REPORT_TYPE_LEDS 0x0E /* RF Report types bitfield */ #define STD_KEYBOARD BIT(1) #define STD_MOUSE BIT(2) #define MULTIMEDIA BIT(3) #define POWER_KEYS BIT(4) #define KBD_MOUSE BIT(5) #define MEDIA_CENTER BIT(8) #define KBD_LEDS BIT(14) /* Fake (bitnr > NUMBER_OF_HID_REPORTS) bit to track HID++ capability */ #define HIDPP BIT_ULL(63) /* HID++ Device Connected Notification */ #define REPORT_TYPE_NOTIF_DEVICE_CONNECTED 0x41 #define HIDPP_PARAM_PROTO_TYPE 0x00 #define HIDPP_PARAM_DEVICE_INFO 0x01 #define HIDPP_PARAM_EQUAD_LSB 0x02 #define HIDPP_PARAM_EQUAD_MSB 0x03 #define HIDPP_PARAM_27MHZ_DEVID 0x03 #define HIDPP_DEVICE_TYPE_MASK GENMASK(3, 0) #define HIDPP_LINK_STATUS_MASK BIT(6) #define HIDPP_MANUFACTURER_MASK BIT(7) #define HIDPP_27MHZ_SECURE_MASK BIT(7) #define HIDPP_DEVICE_TYPE_KEYBOARD 1 #define HIDPP_DEVICE_TYPE_MOUSE 2 #define HIDPP_SET_REGISTER 0x80 #define HIDPP_GET_LONG_REGISTER 0x83 #define HIDPP_REG_CONNECTION_STATE 0x02 #define HIDPP_REG_PAIRING_INFORMATION 0xB5 #define HIDPP_PAIRING_INFORMATION 0x20 #define HIDPP_FAKE_DEVICE_ARRIVAL 0x02 enum recvr_type { recvr_type_dj, recvr_type_hidpp, recvr_type_gaming_hidpp, recvr_type_mouse_only, recvr_type_27mhz, recvr_type_bluetooth, recvr_type_dinovo, }; struct dj_report { u8 report_id; u8 device_index; u8 report_type; u8 report_params[DJREPORT_SHORT_LENGTH - 3]; }; struct hidpp_event { u8 report_id; u8 device_index; u8 sub_id; u8 params[HIDPP_REPORT_LONG_LENGTH - 3U]; } __packed; struct dj_receiver_dev { struct hid_device *mouse; struct hid_device *keyboard; struct hid_device *hidpp; struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN]; struct list_head list; struct kref kref; struct work_struct work; struct kfifo notif_fifo; unsigned long last_query; /* in jiffies */ bool ready; enum recvr_type type; unsigned int unnumbered_application; spinlock_t lock; }; struct dj_device { struct hid_device *hdev; struct dj_receiver_dev *dj_receiver_dev; u64 reports_supported; u8 device_index; }; #define WORKITEM_TYPE_EMPTY 0 #define WORKITEM_TYPE_PAIRED 1 #define WORKITEM_TYPE_UNPAIRED 2 #define WORKITEM_TYPE_UNKNOWN 255 struct dj_workitem { u8 type; /* WORKITEM_TYPE_* */ u8 device_index; u8 device_type; u8 quad_id_msb; u8 quad_id_lsb; u64 reports_supported; }; /* Keyboard descriptor (1) */ static const char kbd_descriptor[] = { 0x05, 0x01, /* USAGE_PAGE (generic Desktop) */ 0x09, 0x06, /* USAGE (Keyboard) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x01, /* REPORT_ID (1) */ 0x95, 0x08, /* REPORT_COUNT (8) */ 0x75, 0x01, /* REPORT_SIZE (1) */ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */ 0x19, 0xE0, /* USAGE_MINIMUM (Left Control) */ 0x29, 0xE7, /* USAGE_MAXIMUM (Right GUI) */ 0x81, 0x02, /* INPUT (Data,Var,Abs) */ 0x95, 0x06, /* REPORT_COUNT (6) */ 0x75, 0x08, /* REPORT_SIZE (8) */ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */ 0x26, 0xFF, 0x00, /* LOGICAL_MAXIMUM (255) */ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */ 0x19, 0x00, /* USAGE_MINIMUM (no event) */ 0x2A, 0xFF, 0x00, /* USAGE_MAXIMUM (reserved) */ 0x81, 0x00, /* INPUT (Data,Ary,Abs) */ 0x85, 0x0e, /* REPORT_ID (14) */ 0x05, 0x08, /* USAGE PAGE (LED page) */ 0x95, 0x05, /* REPORT COUNT (5) */ 0x75, 0x01, /* REPORT SIZE (1) */ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */ 0x19, 0x01, /* USAGE MINIMUM (1) */ 0x29, 0x05, /* USAGE MAXIMUM (5) */ 0x91, 0x02, /* OUTPUT (Data, Variable, Absolute) */ 0x95, 0x01, /* REPORT COUNT (1) */ 0x75, 0x03, /* REPORT SIZE (3) */ 0x91, 0x01, /* OUTPUT (Constant) */ 0xC0 }; /* Mouse descriptor (2) */ static const char mse_descriptor[] = { 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */ 0x09, 0x02, /* USAGE (Mouse) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x02, /* REPORT_ID = 2 */ 0x09, 0x01, /* USAGE (pointer) */ 0xA1, 0x00, /* COLLECTION (physical) */ 0x05, 0x09, /* USAGE_PAGE (buttons) */ 0x19, 0x01, /* USAGE_MIN (1) */ 0x29, 0x10, /* USAGE_MAX (16) */ 0x15, 0x00, /* LOGICAL_MIN (0) */ 0x25, 0x01, /* LOGICAL_MAX (1) */ 0x95, 0x10, /* REPORT_COUNT (16) */ 0x75, 0x01, /* REPORT_SIZE (1) */ 0x81, 0x02, /* INPUT (data var abs) */ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */ 0x75, 0x0C, /* REPORT_SIZE (12) */ 0x95, 0x02, /* REPORT_COUNT (2) */ 0x09, 0x30, /* USAGE (X) */ 0x09, 0x31, /* USAGE (Y) */ 0x81, 0x06, /* INPUT */ 0x15, 0x81, /* LOGICAL_MIN (-127) */ 0x25, 0x7F, /* LOGICAL_MAX (127) */ 0x75, 0x08, /* REPORT_SIZE (8) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x09, 0x38, /* USAGE (wheel) */ 0x81, 0x06, /* INPUT */ 0x05, 0x0C, /* USAGE_PAGE(consumer) */ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x81, 0x06, /* INPUT */ 0xC0, /* END_COLLECTION */ 0xC0, /* END_COLLECTION */ }; /* Mouse descriptor (2) for 27 MHz receiver, only 8 buttons */ static const char mse_27mhz_descriptor[] = { 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */ 0x09, 0x02, /* USAGE (Mouse) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x02, /* REPORT_ID = 2 */ 0x09, 0x01, /* USAGE (pointer) */ 0xA1, 0x00, /* COLLECTION (physical) */ 0x05, 0x09, /* USAGE_PAGE (buttons) */ 0x19, 0x01, /* USAGE_MIN (1) */ 0x29, 0x08, /* USAGE_MAX (8) */ 0x15, 0x00, /* LOGICAL_MIN (0) */ 0x25, 0x01, /* LOGICAL_MAX (1) */ 0x95, 0x08, /* REPORT_COUNT (8) */ 0x75, 0x01, /* REPORT_SIZE (1) */ 0x81, 0x02, /* INPUT (data var abs) */ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */ 0x75, 0x0C, /* REPORT_SIZE (12) */ 0x95, 0x02, /* REPORT_COUNT (2) */ 0x09, 0x30, /* USAGE (X) */ 0x09, 0x31, /* USAGE (Y) */ 0x81, 0x06, /* INPUT */ 0x15, 0x81, /* LOGICAL_MIN (-127) */ 0x25, 0x7F, /* LOGICAL_MAX (127) */ 0x75, 0x08, /* REPORT_SIZE (8) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x09, 0x38, /* USAGE (wheel) */ 0x81, 0x06, /* INPUT */ 0x05, 0x0C, /* USAGE_PAGE(consumer) */ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x81, 0x06, /* INPUT */ 0xC0, /* END_COLLECTION */ 0xC0, /* END_COLLECTION */ }; /* Mouse descriptor (2) for Bluetooth receiver, low-res hwheel, 12 buttons */ static const char mse_bluetooth_descriptor[] = { 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */ 0x09, 0x02, /* USAGE (Mouse) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x02, /* REPORT_ID = 2 */ 0x09, 0x01, /* USAGE (pointer) */ 0xA1, 0x00, /* COLLECTION (physical) */ 0x05, 0x09, /* USAGE_PAGE (buttons) */ 0x19, 0x01, /* USAGE_MIN (1) */ 0x29, 0x08, /* USAGE_MAX (8) */ 0x15, 0x00, /* LOGICAL_MIN (0) */ 0x25, 0x01, /* LOGICAL_MAX (1) */ 0x95, 0x08, /* REPORT_COUNT (8) */ 0x75, 0x01, /* REPORT_SIZE (1) */ 0x81, 0x02, /* INPUT (data var abs) */ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */ 0x75, 0x0C, /* REPORT_SIZE (12) */ 0x95, 0x02, /* REPORT_COUNT (2) */ 0x09, 0x30, /* USAGE (X) */ 0x09, 0x31, /* USAGE (Y) */ 0x81, 0x06, /* INPUT */ 0x15, 0x81, /* LOGICAL_MIN (-127) */ 0x25, 0x7F, /* LOGICAL_MAX (127) */ 0x75, 0x08, /* REPORT_SIZE (8) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x09, 0x38, /* USAGE (wheel) */ 0x81, 0x06, /* INPUT */ 0x05, 0x0C, /* USAGE_PAGE(consumer) */ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */ 0x15, 0xF9, /* LOGICAL_MIN (-7) */ 0x25, 0x07, /* LOGICAL_MAX (7) */ 0x75, 0x04, /* REPORT_SIZE (4) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x81, 0x06, /* INPUT */ 0x05, 0x09, /* USAGE_PAGE (buttons) */ 0x19, 0x09, /* USAGE_MIN (9) */ 0x29, 0x0C, /* USAGE_MAX (12) */ 0x15, 0x00, /* LOGICAL_MIN (0) */ 0x25, 0x01, /* LOGICAL_MAX (1) */ 0x75, 0x01, /* REPORT_SIZE (1) */ 0x95, 0x04, /* REPORT_COUNT (4) */ 0x81, 0x02, /* INPUT (Data,Var,Abs) */ 0xC0, /* END_COLLECTION */ 0xC0, /* END_COLLECTION */ }; /* Mouse descriptor (5) for Bluetooth receiver, normal-res hwheel, 8 buttons */ static const char mse5_bluetooth_descriptor[] = { 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */ 0x09, 0x02, /* Usage (Mouse) */ 0xa1, 0x01, /* Collection (Application) */ 0x85, 0x05, /* Report ID (5) */ 0x09, 0x01, /* Usage (Pointer) */ 0xa1, 0x00, /* Collection (Physical) */ 0x05, 0x09, /* Usage Page (Button) */ 0x19, 0x01, /* Usage Minimum (1) */ 0x29, 0x08, /* Usage Maximum (8) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x95, 0x08, /* Report Count (8) */ 0x75, 0x01, /* Report Size (1) */ 0x81, 0x02, /* Input (Data,Var,Abs) */ 0x05, 0x01, /* Usage Page (Generic Desktop) */ 0x16, 0x01, 0xf8, /* Logical Minimum (-2047) */ 0x26, 0xff, 0x07, /* Logical Maximum (2047) */ 0x75, 0x0c, /* Report Size (12) */ 0x95, 0x02, /* Report Count (2) */ 0x09, 0x30, /* Usage (X) */ 0x09, 0x31, /* Usage (Y) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x15, 0x81, /* Logical Minimum (-127) */ 0x25, 0x7f, /* Logical Maximum (127) */ 0x75, 0x08, /* Report Size (8) */ 0x95, 0x01, /* Report Count (1) */ 0x09, 0x38, /* Usage (Wheel) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x05, 0x0c, /* Usage Page (Consumer Devices) */ 0x0a, 0x38, 0x02, /* Usage (AC Pan) */ 0x15, 0x81, /* Logical Minimum (-127) */ 0x25, 0x7f, /* Logical Maximum (127) */ 0x75, 0x08, /* Report Size (8) */ 0x95, 0x01, /* Report Count (1) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0xc0, /* End Collection */ 0xc0, /* End Collection */ }; /* Gaming Mouse descriptor (2) */ static const char mse_high_res_descriptor[] = { 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */ 0x09, 0x02, /* USAGE (Mouse) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x02, /* REPORT_ID = 2 */ 0x09, 0x01, /* USAGE (pointer) */ 0xA1, 0x00, /* COLLECTION (physical) */ 0x05, 0x09, /* USAGE_PAGE (buttons) */ 0x19, 0x01, /* USAGE_MIN (1) */ 0x29, 0x10, /* USAGE_MAX (16) */ 0x15, 0x00, /* LOGICAL_MIN (0) */ 0x25, 0x01, /* LOGICAL_MAX (1) */ 0x95, 0x10, /* REPORT_COUNT (16) */ 0x75, 0x01, /* REPORT_SIZE (1) */ 0x81, 0x02, /* INPUT (data var abs) */ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */ 0x16, 0x01, 0x80, /* LOGICAL_MIN (-32767) */ 0x26, 0xFF, 0x7F, /* LOGICAL_MAX (32767) */ 0x75, 0x10, /* REPORT_SIZE (16) */ 0x95, 0x02, /* REPORT_COUNT (2) */ 0x09, 0x30, /* USAGE (X) */ 0x09, 0x31, /* USAGE (Y) */ 0x81, 0x06, /* INPUT */ 0x15, 0x81, /* LOGICAL_MIN (-127) */ 0x25, 0x7F, /* LOGICAL_MAX (127) */ 0x75, 0x08, /* REPORT_SIZE (8) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x09, 0x38, /* USAGE (wheel) */ 0x81, 0x06, /* INPUT */ 0x05, 0x0C, /* USAGE_PAGE(consumer) */ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x81, 0x06, /* INPUT */ 0xC0, /* END_COLLECTION */ 0xC0, /* END_COLLECTION */ }; /* Consumer Control descriptor (3) */ static const char consumer_descriptor[] = { 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */ 0x09, 0x01, /* USAGE (Consumer Control) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x03, /* REPORT_ID = 3 */ 0x75, 0x10, /* REPORT_SIZE (16) */ 0x95, 0x02, /* REPORT_COUNT (2) */ 0x15, 0x01, /* LOGICAL_MIN (1) */ 0x26, 0xFF, 0x02, /* LOGICAL_MAX (767) */ 0x19, 0x01, /* USAGE_MIN (1) */ 0x2A, 0xFF, 0x02, /* USAGE_MAX (767) */ 0x81, 0x00, /* INPUT (Data Ary Abs) */ 0xC0, /* END_COLLECTION */ }; /* */ /* System control descriptor (4) */ static const char syscontrol_descriptor[] = { 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */ 0x09, 0x80, /* USAGE (System Control) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x04, /* REPORT_ID = 4 */ 0x75, 0x02, /* REPORT_SIZE (2) */ 0x95, 0x01, /* REPORT_COUNT (1) */ 0x15, 0x01, /* LOGICAL_MIN (1) */ 0x25, 0x03, /* LOGICAL_MAX (3) */ 0x09, 0x82, /* USAGE (System Sleep) */ 0x09, 0x81, /* USAGE (System Power Down) */ 0x09, 0x83, /* USAGE (System Wake Up) */ 0x81, 0x60, /* INPUT (Data Ary Abs NPrf Null) */ 0x75, 0x06, /* REPORT_SIZE (6) */ 0x81, 0x03, /* INPUT (Cnst Var Abs) */ 0xC0, /* END_COLLECTION */ }; /* Media descriptor (8) */ static const char media_descriptor[] = { 0x06, 0xbc, 0xff, /* Usage Page 0xffbc */ 0x09, 0x88, /* Usage 0x0088 */ 0xa1, 0x01, /* BeginCollection */ 0x85, 0x08, /* Report ID 8 */ 0x19, 0x01, /* Usage Min 0x0001 */ 0x29, 0xff, /* Usage Max 0x00ff */ 0x15, 0x01, /* Logical Min 1 */ 0x26, 0xff, 0x00, /* Logical Max 255 */ 0x75, 0x08, /* Report Size 8 */ 0x95, 0x01, /* Report Count 1 */ 0x81, 0x00, /* Input */ 0xc0, /* EndCollection */ }; /* */ /* HIDPP descriptor */ static const char hidpp_descriptor[] = { 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */ 0x09, 0x01, /* Usage (Vendor Usage 1) */ 0xa1, 0x01, /* Collection (Application) */ 0x85, 0x10, /* Report ID (16) */ 0x75, 0x08, /* Report Size (8) */ 0x95, 0x06, /* Report Count (6) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x26, 0xff, 0x00, /* Logical Maximum (255) */ 0x09, 0x01, /* Usage (Vendor Usage 1) */ 0x81, 0x00, /* Input (Data,Arr,Abs) */ 0x09, 0x01, /* Usage (Vendor Usage 1) */ 0x91, 0x00, /* Output (Data,Arr,Abs) */ 0xc0, /* End Collection */ 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */ 0x09, 0x02, /* Usage (Vendor Usage 2) */ 0xa1, 0x01, /* Collection (Application) */ 0x85, 0x11, /* Report ID (17) */ 0x75, 0x08, /* Report Size (8) */ 0x95, 0x13, /* Report Count (19) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x26, 0xff, 0x00, /* Logical Maximum (255) */ 0x09, 0x02, /* Usage (Vendor Usage 2) */ 0x81, 0x00, /* Input (Data,Arr,Abs) */ 0x09, 0x02, /* Usage (Vendor Usage 2) */ 0x91, 0x00, /* Output (Data,Arr,Abs) */ 0xc0, /* End Collection */ 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */ 0x09, 0x04, /* Usage (Vendor Usage 0x04) */ 0xa1, 0x01, /* Collection (Application) */ 0x85, 0x20, /* Report ID (32) */ 0x75, 0x08, /* Report Size (8) */ 0x95, 0x0e, /* Report Count (14) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x26, 0xff, 0x00, /* Logical Maximum (255) */ 0x09, 0x41, /* Usage (Vendor Usage 0x41) */ 0x81, 0x00, /* Input (Data,Arr,Abs) */ 0x09, 0x41, /* Usage (Vendor Usage 0x41) */ 0x91, 0x00, /* Output (Data,Arr,Abs) */ 0x85, 0x21, /* Report ID (33) */ 0x95, 0x1f, /* Report Count (31) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x26, 0xff, 0x00, /* Logical Maximum (255) */ 0x09, 0x42, /* Usage (Vendor Usage 0x42) */ 0x81, 0x00, /* Input (Data,Arr,Abs) */ 0x09, 0x42, /* Usage (Vendor Usage 0x42) */ 0x91, 0x00, /* Output (Data,Arr,Abs) */ 0xc0, /* End Collection */ }; /* Maximum size of all defined hid reports in bytes (including report id) */ #define MAX_REPORT_SIZE 8 /* Make sure all descriptors are present here */ #define MAX_RDESC_SIZE \ (sizeof(kbd_descriptor) + \ sizeof(mse_bluetooth_descriptor) + \ sizeof(mse5_bluetooth_descriptor) + \ sizeof(consumer_descriptor) + \ sizeof(syscontrol_descriptor) + \ sizeof(media_descriptor) + \ sizeof(hidpp_descriptor)) /* Number of possible hid report types that can be created by this driver. * * Right now, RF report types have the same report types (or report id's) * than the hid report created from those RF reports. In the future * this doesnt have to be true. * * For instance, RF report type 0x01 which has a size of 8 bytes, corresponds * to hid report id 0x01, this is standard keyboard. Same thing applies to mice * reports and consumer control, etc. If a new RF report is created, it doesn't * has to have the same report id as its corresponding hid report, so an * translation may have to take place for future report types. */ #define NUMBER_OF_HID_REPORTS 32 static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = { [1] = 8, /* Standard keyboard */ [2] = 8, /* Standard mouse */ [3] = 5, /* Consumer control */ [4] = 2, /* System control */ [8] = 2, /* Media Center */ }; #define LOGITECH_DJ_INTERFACE_NUMBER 0x02 static const struct hid_ll_driver logi_dj_ll_driver; static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev); static void delayedwork_callback(struct work_struct *work); static LIST_HEAD(dj_hdev_list); static DEFINE_MUTEX(dj_hdev_list_lock); static bool recvr_type_is_bluetooth(enum recvr_type type) { return type == recvr_type_bluetooth || type == recvr_type_dinovo; } /* * dj/HID++ receivers are really a single logical entity, but for BIOS/Windows * compatibility they have multiple USB interfaces. On HID++ receivers we need * to listen for input reports on both interfaces. The functions below are used * to create a single struct dj_receiver_dev for all interfaces belonging to * a single USB-device / receiver. */ static struct dj_receiver_dev *dj_find_receiver_dev(struct hid_device *hdev, enum recvr_type type) { struct dj_receiver_dev *djrcv_dev; char sep; /* * The bluetooth receiver contains a built-in hub and has separate * USB-devices for the keyboard and mouse interfaces. */ sep = recvr_type_is_bluetooth(type) ? '.' : '/'; /* Try to find an already-probed interface from the same device */ list_for_each_entry(djrcv_dev, &dj_hdev_list, list) { if (djrcv_dev->mouse && hid_compare_device_paths(hdev, djrcv_dev->mouse, sep)) { kref_get(&djrcv_dev->kref); return djrcv_dev; } if (djrcv_dev->keyboard && hid_compare_device_paths(hdev, djrcv_dev->keyboard, sep)) { kref_get(&djrcv_dev->kref); return djrcv_dev; } if (djrcv_dev->hidpp && hid_compare_device_paths(hdev, djrcv_dev->hidpp, sep)) { kref_get(&djrcv_dev->kref); return djrcv_dev; } } return NULL; } static void dj_release_receiver_dev(struct kref *kref) { struct dj_receiver_dev *djrcv_dev = container_of(kref, struct dj_receiver_dev, kref); list_del(&djrcv_dev->list); kfifo_free(&djrcv_dev->notif_fifo); kfree(djrcv_dev); } static void dj_put_receiver_dev(struct hid_device *hdev) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); mutex_lock(&dj_hdev_list_lock); if (djrcv_dev->mouse == hdev) djrcv_dev->mouse = NULL; if (djrcv_dev->keyboard == hdev) djrcv_dev->keyboard = NULL; if (djrcv_dev->hidpp == hdev) djrcv_dev->hidpp = NULL; kref_put(&djrcv_dev->kref, dj_release_receiver_dev); mutex_unlock(&dj_hdev_list_lock); } static struct dj_receiver_dev *dj_get_receiver_dev(struct hid_device *hdev, enum recvr_type type, unsigned int application, bool is_hidpp) { struct dj_receiver_dev *djrcv_dev; mutex_lock(&dj_hdev_list_lock); djrcv_dev = dj_find_receiver_dev(hdev, type); if (!djrcv_dev) { djrcv_dev = kzalloc(sizeof(*djrcv_dev), GFP_KERNEL); if (!djrcv_dev) goto out; INIT_WORK(&djrcv_dev->work, delayedwork_callback); spin_lock_init(&djrcv_dev->lock); if (kfifo_alloc(&djrcv_dev->notif_fifo, DJ_MAX_NUMBER_NOTIFS * sizeof(struct dj_workitem), GFP_KERNEL)) { kfree(djrcv_dev); djrcv_dev = NULL; goto out; } kref_init(&djrcv_dev->kref); list_add_tail(&djrcv_dev->list, &dj_hdev_list); djrcv_dev->last_query = jiffies; djrcv_dev->type = type; } if (application == HID_GD_KEYBOARD) djrcv_dev->keyboard = hdev; if (application == HID_GD_MOUSE) djrcv_dev->mouse = hdev; if (is_hidpp) djrcv_dev->hidpp = hdev; hid_set_drvdata(hdev, djrcv_dev); out: mutex_unlock(&dj_hdev_list_lock); return djrcv_dev; } static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, struct dj_workitem *workitem) { /* Called in delayed work context */ struct dj_device *dj_dev; unsigned long flags; spin_lock_irqsave(&djrcv_dev->lock, flags); dj_dev = djrcv_dev->paired_dj_devices[workitem->device_index]; djrcv_dev->paired_dj_devices[workitem->device_index] = NULL; spin_unlock_irqrestore(&djrcv_dev->lock, flags); if (dj_dev != NULL) { hid_destroy_device(dj_dev->hdev); kfree(dj_dev); } else { hid_err(djrcv_dev->hidpp, "%s: can't destroy a NULL device\n", __func__); } } static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, struct dj_workitem *workitem) { /* Called in delayed work context */ struct hid_device *djrcv_hdev = djrcv_dev->hidpp; struct hid_device *dj_hiddev; struct dj_device *dj_dev; u8 device_index = workitem->device_index; unsigned long flags; /* Device index goes from 1 to 6, we need 3 bytes to store the * semicolon, the index, and a null terminator */ unsigned char tmpstr[3]; /* We are the only one ever adding a device, no need to lock */ if (djrcv_dev->paired_dj_devices[device_index]) { /* The device is already known. No need to reallocate it. */ dbg_hid("%s: device is already known\n", __func__); return; } dj_hiddev = hid_allocate_device(); if (IS_ERR(dj_hiddev)) { hid_err(djrcv_hdev, "%s: hid_allocate_dev failed\n", __func__); return; } dj_hiddev->ll_driver = &logi_dj_ll_driver; dj_hiddev->dev.parent = &djrcv_hdev->dev; dj_hiddev->bus = BUS_USB; dj_hiddev->vendor = djrcv_hdev->vendor; dj_hiddev->product = (workitem->quad_id_msb << 8) | workitem->quad_id_lsb; if (workitem->device_type) { const char *type_str = "Device"; switch (workitem->device_type) { case 0x01: type_str = "Keyboard"; break; case 0x02: type_str = "Mouse"; break; case 0x03: type_str = "Numpad"; break; case 0x04: type_str = "Presenter"; break; case 0x07: type_str = "Remote Control"; break; case 0x08: type_str = "Trackball"; break; case 0x09: type_str = "Touchpad"; break; } snprintf(dj_hiddev->name, sizeof(dj_hiddev->name), "Logitech Wireless %s PID:%04x", type_str, dj_hiddev->product); } else { snprintf(dj_hiddev->name, sizeof(dj_hiddev->name), "Logitech Wireless Device PID:%04x", dj_hiddev->product); } if (djrcv_dev->type == recvr_type_27mhz) dj_hiddev->group = HID_GROUP_LOGITECH_27MHZ_DEVICE; else dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE; memcpy(dj_hiddev->phys, djrcv_hdev->phys, sizeof(djrcv_hdev->phys)); snprintf(tmpstr, sizeof(tmpstr), ":%d", device_index); strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys)); dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL); if (!dj_dev) { hid_err(djrcv_hdev, "%s: failed allocating dj_dev\n", __func__); goto dj_device_allocate_fail; } dj_dev->reports_supported = workitem->reports_supported; dj_dev->hdev = dj_hiddev; dj_dev->dj_receiver_dev = djrcv_dev; dj_dev->device_index = device_index; dj_hiddev->driver_data = dj_dev; spin_lock_irqsave(&djrcv_dev->lock, flags); djrcv_dev->paired_dj_devices[device_index] = dj_dev; spin_unlock_irqrestore(&djrcv_dev->lock, flags); if (hid_add_device(dj_hiddev)) { hid_err(djrcv_hdev, "%s: failed adding dj_device\n", __func__); goto hid_add_device_fail; } return; hid_add_device_fail: spin_lock_irqsave(&djrcv_dev->lock, flags); djrcv_dev->paired_dj_devices[device_index] = NULL; spin_unlock_irqrestore(&djrcv_dev->lock, flags); kfree(dj_dev); dj_device_allocate_fail: hid_destroy_device(dj_hiddev); } static void delayedwork_callback(struct work_struct *work) { struct dj_receiver_dev *djrcv_dev = container_of(work, struct dj_receiver_dev, work); struct dj_workitem workitem; unsigned long flags; int count; int retval; dbg_hid("%s\n", __func__); spin_lock_irqsave(&djrcv_dev->lock, flags); /* * Since we attach to multiple interfaces, we may get scheduled before * we are bound to the HID++ interface, catch this. */ if (!djrcv_dev->ready) { pr_warn("%s: delayedwork queued before hidpp interface was enumerated\n", __func__); spin_unlock_irqrestore(&djrcv_dev->lock, flags); return; } count = kfifo_out(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem)); if (count != sizeof(workitem)) { spin_unlock_irqrestore(&djrcv_dev->lock, flags); return; } if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) schedule_work(&djrcv_dev->work); spin_unlock_irqrestore(&djrcv_dev->lock, flags); switch (workitem.type) { case WORKITEM_TYPE_PAIRED: logi_dj_recv_add_djhid_device(djrcv_dev, &workitem); break; case WORKITEM_TYPE_UNPAIRED: logi_dj_recv_destroy_djhid_device(djrcv_dev, &workitem); break; case WORKITEM_TYPE_UNKNOWN: retval = logi_dj_recv_query_paired_devices(djrcv_dev); if (retval) { hid_err(djrcv_dev->hidpp, "%s: logi_dj_recv_query_paired_devices error: %d\n", __func__, retval); } break; case WORKITEM_TYPE_EMPTY: dbg_hid("%s: device list is empty\n", __func__); break; } } /* * Sometimes we receive reports for which we do not have a paired dj_device * associated with the device_index or report-type to forward the report to. * This means that the original "device paired" notification corresponding * to the dj_device never arrived to this driver. Possible reasons for this are: * 1) hid-core discards all packets coming from a device during probe(). * 2) if the receiver is plugged into a KVM switch then the pairing reports * are only forwarded to it if the focus is on this PC. * This function deals with this by re-asking the receiver for the list of * connected devices in the delayed work callback. * This function MUST be called with djrcv->lock held. */ static void logi_dj_recv_queue_unknown_work(struct dj_receiver_dev *djrcv_dev) { struct dj_workitem workitem = { .type = WORKITEM_TYPE_UNKNOWN }; /* Rate limit queries done because of unhandled reports to 2/sec */ if (time_before(jiffies, djrcv_dev->last_query + HZ / 2)) return; kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem)); schedule_work(&djrcv_dev->work); } static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) { /* We are called from atomic context (tasklet && djrcv->lock held) */ struct dj_workitem workitem = { .device_index = dj_report->device_index, }; switch (dj_report->report_type) { case REPORT_TYPE_NOTIF_DEVICE_PAIRED: workitem.type = WORKITEM_TYPE_PAIRED; if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & SPFUNCTION_DEVICE_LIST_EMPTY) { workitem.type = WORKITEM_TYPE_EMPTY; break; } fallthrough; case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: workitem.quad_id_msb = dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]; workitem.quad_id_lsb = dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]; workitem.reports_supported = get_unaligned_le32( dj_report->report_params + DEVICE_PAIRED_RF_REPORT_TYPE); workitem.reports_supported |= HIDPP; if (dj_report->report_type == REPORT_TYPE_NOTIF_DEVICE_UNPAIRED) workitem.type = WORKITEM_TYPE_UNPAIRED; break; default: logi_dj_recv_queue_unknown_work(djrcv_dev); return; } kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem)); schedule_work(&djrcv_dev->work); } /* * Some quad/bluetooth keyboards have a builtin touchpad in this case we see * only 1 paired device with a device_type of REPORT_TYPE_KEYBOARD. For the * touchpad to work we must also forward mouse input reports to the dj_hiddev * created for the keyboard (instead of forwarding them to a second paired * device with a device_type of REPORT_TYPE_MOUSE as we normally would). * * On Dinovo receivers the keyboard's touchpad and an optional paired actual * mouse send separate input reports, INPUT(2) aka STD_MOUSE for the mouse * and INPUT(5) aka KBD_MOUSE for the keyboard's touchpad. * * On MX5x00 receivers (which can also be paired with a Dinovo keyboard) * INPUT(2) is used for both an optional paired actual mouse and for the * keyboard's touchpad. */ static const u16 kbd_builtin_touchpad_ids[] = { 0xb309, /* Dinovo Edge */ 0xb30c, /* Dinovo Mini */ }; static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev, struct hidpp_event *hidpp_report, struct dj_workitem *workitem) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); int i, id; workitem->type = WORKITEM_TYPE_PAIRED; workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] & HIDPP_DEVICE_TYPE_MASK; workitem->quad_id_msb = hidpp_report->params[HIDPP_PARAM_EQUAD_MSB]; workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_EQUAD_LSB]; switch (workitem->device_type) { case REPORT_TYPE_KEYBOARD: workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA | POWER_KEYS | MEDIA_CENTER | HIDPP; id = (workitem->quad_id_msb << 8) | workitem->quad_id_lsb; for (i = 0; i < ARRAY_SIZE(kbd_builtin_touchpad_ids); i++) { if (id == kbd_builtin_touchpad_ids[i]) { if (djrcv_dev->type == recvr_type_dinovo) workitem->reports_supported |= KBD_MOUSE; else workitem->reports_supported |= STD_MOUSE; break; } } break; case REPORT_TYPE_MOUSE: workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA; break; } } static void logi_hidpp_dev_conn_notif_27mhz(struct hid_device *hdev, struct hidpp_event *hidpp_report, struct dj_workitem *workitem) { workitem->type = WORKITEM_TYPE_PAIRED; workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID]; switch (hidpp_report->device_index) { case 1: /* Index 1 is always a mouse */ case 2: /* Index 2 is always a mouse */ workitem->device_type = HIDPP_DEVICE_TYPE_MOUSE; workitem->reports_supported |= STD_MOUSE | HIDPP; break; case 3: /* Index 3 is always the keyboard */ if (hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] & HIDPP_27MHZ_SECURE_MASK) { hid_info(hdev, "Keyboard connection is encrypted\n"); } else { hid_warn(hdev, "Keyboard events are send over the air in plain-text / unencrypted\n"); hid_warn(hdev, "See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n"); } fallthrough; case 4: /* Index 4 is used for an optional separate numpad */ workitem->device_type = HIDPP_DEVICE_TYPE_KEYBOARD; workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA | POWER_KEYS | HIDPP; break; default: hid_warn(hdev, "%s: unexpected device-index %d", __func__, hidpp_report->device_index); } } static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, struct hidpp_event *hidpp_report) { /* We are called from atomic context (tasklet && djrcv->lock held) */ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); const char *device_type = "UNKNOWN"; struct dj_workitem workitem = { .type = WORKITEM_TYPE_EMPTY, .device_index = hidpp_report->device_index, }; switch (hidpp_report->params[HIDPP_PARAM_PROTO_TYPE]) { case 0x01: device_type = "Bluetooth"; /* Bluetooth connect packet contents is the same as (e)QUAD */ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] & HIDPP_MANUFACTURER_MASK)) { hid_info(hdev, "Non Logitech device connected on slot %d\n", hidpp_report->device_index); workitem.reports_supported &= ~HIDPP; } break; case 0x02: device_type = "27 Mhz"; logi_hidpp_dev_conn_notif_27mhz(hdev, hidpp_report, &workitem); break; case 0x03: device_type = "QUAD or eQUAD"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); break; case 0x04: device_type = "eQUAD step 4 DJ"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); break; case 0x05: device_type = "DFU Lite"; break; case 0x06: device_type = "eQUAD step 4 Lite"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); break; case 0x07: device_type = "eQUAD step 4 Gaming"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; break; case 0x08: device_type = "eQUAD step 4 for gamepads"; break; case 0x0a: device_type = "eQUAD nano Lite"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); break; case 0x0c: device_type = "eQUAD Lightspeed 1"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; break; case 0x0d: device_type = "eQUAD Lightspeed 1.1"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; break; case 0x0f: case 0x11: device_type = "eQUAD Lightspeed 1.2"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; break; } /* custom receiver device (eg. powerplay) */ if (hidpp_report->device_index == 7) { workitem.reports_supported |= HIDPP; } if (workitem.type == WORKITEM_TYPE_EMPTY) { hid_warn(hdev, "unusable device of type %s (0x%02x) connected on slot %d", device_type, hidpp_report->params[HIDPP_PARAM_PROTO_TYPE], hidpp_report->device_index); return; } hid_info(hdev, "device of type %s (0x%02x) connected on slot %d", device_type, hidpp_report->params[HIDPP_PARAM_PROTO_TYPE], hidpp_report->device_index); kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem)); schedule_work(&djrcv_dev->work); } static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) { /* We are called from atomic context (tasklet && djrcv->lock held) */ unsigned int i; u8 reportbuffer[MAX_REPORT_SIZE]; struct dj_device *djdev; djdev = djrcv_dev->paired_dj_devices[dj_report->device_index]; memset(reportbuffer, 0, sizeof(reportbuffer)); for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) { if (djdev->reports_supported & (1 << i)) { reportbuffer[0] = i; if (hid_input_report(djdev->hdev, HID_INPUT_REPORT, reportbuffer, hid_reportid_size_map[i], 1)) { dbg_hid("hid_input_report error sending null " "report\n"); } } } } static void logi_dj_recv_forward_dj(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) { /* We are called from atomic context (tasklet && djrcv->lock held) */ struct dj_device *dj_device; dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index]; if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) || (hid_reportid_size_map[dj_report->report_type] == 0)) { dbg_hid("invalid report type:%x\n", dj_report->report_type); return; } if (hid_input_report(dj_device->hdev, HID_INPUT_REPORT, &dj_report->report_type, hid_reportid_size_map[dj_report->report_type], 1)) { dbg_hid("hid_input_report error\n"); } } static void logi_dj_recv_forward_report(struct dj_device *dj_dev, u8 *data, int size) { /* We are called from atomic context (tasklet && djrcv->lock held) */ if (hid_input_report(dj_dev->hdev, HID_INPUT_REPORT, data, size, 1)) dbg_hid("hid_input_report error\n"); } static void logi_dj_recv_forward_input_report(struct hid_device *hdev, u8 *data, int size) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct dj_device *dj_dev; unsigned long flags; u8 report = data[0]; int i; if (report > REPORT_TYPE_RFREPORT_LAST) { hid_err(hdev, "Unexpected input report number %d\n", report); return; } spin_lock_irqsave(&djrcv_dev->lock, flags); for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) { dj_dev = djrcv_dev->paired_dj_devices[i]; if (dj_dev && (dj_dev->reports_supported & BIT(report))) { logi_dj_recv_forward_report(dj_dev, data, size); spin_unlock_irqrestore(&djrcv_dev->lock, flags); return; } } logi_dj_recv_queue_unknown_work(djrcv_dev); spin_unlock_irqrestore(&djrcv_dev->lock, flags); dbg_hid("No dj-devs handling input report number %d\n", report); } static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) { struct hid_device *hdev = djrcv_dev->hidpp; struct hid_report *report; struct hid_report_enum *output_report_enum; u8 *data = (u8 *)(&dj_report->device_index); unsigned int i; output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT]; report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT]; if (!report) { hid_err(hdev, "%s: unable to find dj report\n", __func__); return -ENODEV; } for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++) report->field[0]->value[i] = data[i]; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); return 0; } static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev) { static const u8 template[] = { REPORT_ID_HIDPP_SHORT, HIDPP_RECEIVER_INDEX, HIDPP_SET_REGISTER, HIDPP_REG_CONNECTION_STATE, HIDPP_FAKE_DEVICE_ARRIVAL, 0x00, 0x00 }; u8 *hidpp_report; int retval; hidpp_report = kmemdup(template, sizeof(template), GFP_KERNEL); if (!hidpp_report) return -ENOMEM; retval = hid_hw_raw_request(djrcv_dev->hidpp, REPORT_ID_HIDPP_SHORT, hidpp_report, sizeof(template), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); kfree(hidpp_report); return (retval < 0) ? retval : 0; } static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) { struct dj_report *dj_report; int retval; djrcv_dev->last_query = jiffies; if (djrcv_dev->type != recvr_type_dj) return logi_dj_recv_query_hidpp_devices(djrcv_dev); dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; dj_report->device_index = HIDPP_RECEIVER_INDEX; dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES; retval = logi_dj_recv_send_report(djrcv_dev, dj_report); kfree(dj_report); return retval; } static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, unsigned timeout) { struct hid_device *hdev = djrcv_dev->hidpp; struct dj_report *dj_report; u8 *buf; int retval = 0; dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; if (djrcv_dev->type == recvr_type_dj) { dj_report->report_id = REPORT_ID_DJ_SHORT; dj_report->device_index = HIDPP_RECEIVER_INDEX; dj_report->report_type = REPORT_TYPE_CMD_SWITCH; dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F; dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout; retval = logi_dj_recv_send_report(djrcv_dev, dj_report); /* * Ugly sleep to work around a USB 3.0 bug when the receiver is * still processing the "switch-to-dj" command while we send an * other command. * 50 msec should gives enough time to the receiver to be ready. */ msleep(50); if (retval) { kfree(dj_report); return retval; } } /* * Magical bits to set up hidpp notifications when the dj devices * are connected/disconnected. * * We can reuse dj_report because HIDPP_REPORT_SHORT_LENGTH is smaller * than DJREPORT_SHORT_LENGTH. */ buf = (u8 *)dj_report; memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH); buf[0] = REPORT_ID_HIDPP_SHORT; buf[1] = HIDPP_RECEIVER_INDEX; buf[2] = 0x80; buf[3] = 0x00; buf[4] = 0x00; buf[5] = 0x09; buf[6] = 0x00; retval = hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf, HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); kfree(dj_report); return retval; } static int logi_dj_ll_open(struct hid_device *hid) { dbg_hid("%s: %s\n", __func__, hid->phys); return 0; } static void logi_dj_ll_close(struct hid_device *hid) { dbg_hid("%s: %s\n", __func__, hid->phys); } /* * Register 0xB5 is "pairing information". It is solely intended for the * receiver, so do not overwrite the device index. */ static u8 unifying_pairing_query[] = { REPORT_ID_HIDPP_SHORT, HIDPP_RECEIVER_INDEX, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION }; static u8 unifying_pairing_answer[] = { REPORT_ID_HIDPP_LONG, HIDPP_RECEIVER_INDEX, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION }; static int logi_dj_ll_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t count, unsigned char report_type, int reqtype) { struct dj_device *djdev = hid->driver_data; struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev; u8 *out_buf; int ret; if ((buf[0] == REPORT_ID_HIDPP_SHORT) || (buf[0] == REPORT_ID_HIDPP_LONG) || (buf[0] == REPORT_ID_HIDPP_VERY_LONG)) { if (count < 2) return -EINVAL; /* special case where we should not overwrite * the device_index */ if (count == 7 && !memcmp(buf, unifying_pairing_query, sizeof(unifying_pairing_query))) buf[4] = (buf[4] & 0xf0) | (djdev->device_index - 1); else buf[1] = djdev->device_index; return hid_hw_raw_request(djrcv_dev->hidpp, reportnum, buf, count, report_type, reqtype); } if (buf[0] != REPORT_TYPE_LEDS) return -EINVAL; if (djrcv_dev->type != recvr_type_dj && count >= 2) { if (!djrcv_dev->keyboard) { hid_warn(hid, "Received REPORT_TYPE_LEDS request before the keyboard interface was enumerated\n"); return 0; } /* usbhid overrides the report ID and ignores the first byte */ return hid_hw_raw_request(djrcv_dev->keyboard, 0, buf, count, report_type, reqtype); } out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC); if (!out_buf) return -ENOMEM; if (count > DJREPORT_SHORT_LENGTH - 2) count = DJREPORT_SHORT_LENGTH - 2; out_buf[0] = REPORT_ID_DJ_SHORT; out_buf[1] = djdev->device_index; memcpy(out_buf + 2, buf, count); ret = hid_hw_raw_request(djrcv_dev->hidpp, out_buf[0], out_buf, DJREPORT_SHORT_LENGTH, report_type, reqtype); kfree(out_buf); return ret; } static void rdcat(char *rdesc, unsigned int *rsize, const char *data, unsigned int size) { memcpy(rdesc + *rsize, data, size); *rsize += size; } static int logi_dj_ll_parse(struct hid_device *hid) { struct dj_device *djdev = hid->driver_data; unsigned int rsize = 0; char *rdesc; int retval; dbg_hid("%s\n", __func__); djdev->hdev->version = 0x0111; djdev->hdev->country = 0x00; rdesc = kmalloc(MAX_RDESC_SIZE, GFP_KERNEL); if (!rdesc) return -ENOMEM; if (djdev->reports_supported & STD_KEYBOARD) { dbg_hid("%s: sending a kbd descriptor, reports_supported: %llx\n", __func__, djdev->reports_supported); rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor)); } if (djdev->reports_supported & STD_MOUSE) { dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n", __func__, djdev->reports_supported); if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp || djdev->dj_receiver_dev->type == recvr_type_mouse_only) rdcat(rdesc, &rsize, mse_high_res_descriptor, sizeof(mse_high_res_descriptor)); else if (djdev->dj_receiver_dev->type == recvr_type_27mhz) rdcat(rdesc, &rsize, mse_27mhz_descriptor, sizeof(mse_27mhz_descriptor)); else if (recvr_type_is_bluetooth(djdev->dj_receiver_dev->type)) rdcat(rdesc, &rsize, mse_bluetooth_descriptor, sizeof(mse_bluetooth_descriptor)); else rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor)); } if (djdev->reports_supported & KBD_MOUSE) { dbg_hid("%s: sending a kbd-mouse descriptor, reports_supported: %llx\n", __func__, djdev->reports_supported); rdcat(rdesc, &rsize, mse5_bluetooth_descriptor, sizeof(mse5_bluetooth_descriptor)); } if (djdev->reports_supported & MULTIMEDIA) { dbg_hid("%s: sending a multimedia report descriptor: %llx\n", __func__, djdev->reports_supported); rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor)); } if (djdev->reports_supported & POWER_KEYS) { dbg_hid("%s: sending a power keys report descriptor: %llx\n", __func__, djdev->reports_supported); rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor)); } if (djdev->reports_supported & MEDIA_CENTER) { dbg_hid("%s: sending a media center report descriptor: %llx\n", __func__, djdev->reports_supported); rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor)); } if (djdev->reports_supported & KBD_LEDS) { dbg_hid("%s: need to send kbd leds report descriptor: %llx\n", __func__, djdev->reports_supported); } if (djdev->reports_supported & HIDPP) { dbg_hid("%s: sending a HID++ descriptor, reports_supported: %llx\n", __func__, djdev->reports_supported); rdcat(rdesc, &rsize, hidpp_descriptor, sizeof(hidpp_descriptor)); } retval = hid_parse_report(hid, rdesc, rsize); kfree(rdesc); return retval; } static int logi_dj_ll_start(struct hid_device *hid) { dbg_hid("%s\n", __func__); return 0; } static void logi_dj_ll_stop(struct hid_device *hid) { dbg_hid("%s\n", __func__); } static bool logi_dj_ll_may_wakeup(struct hid_device *hid) { struct dj_device *djdev = hid->driver_data; struct dj_receiver_dev *djrcv_dev = djdev->dj_receiver_dev; return hid_hw_may_wakeup(djrcv_dev->hidpp); } static const struct hid_ll_driver logi_dj_ll_driver = { .parse = logi_dj_ll_parse, .start = logi_dj_ll_start, .stop = logi_dj_ll_stop, .open = logi_dj_ll_open, .close = logi_dj_ll_close, .raw_request = logi_dj_ll_raw_request, .may_wakeup = logi_dj_ll_may_wakeup, }; static int logi_dj_dj_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct dj_report *dj_report = (struct dj_report *) data; unsigned long flags; /* * Here we receive all data coming from iface 2, there are 3 cases: * * 1) Data is intended for this driver i. e. data contains arrival, * departure, etc notifications, in which case we queue them for delayed * processing by the work queue. We return 1 to hid-core as no further * processing is required from it. * * 2) Data informs a connection change, if the change means rf link * loss, then we must send a null report to the upper layer to discard * potentially pressed keys that may be repeated forever by the input * layer. Return 1 to hid-core as no further processing is required. * * 3) Data is an actual input event from a paired DJ device in which * case we forward it to the correct hid device (via hid_input_report() * ) and return 1 so hid-core does not anything else with it. */ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { /* * Device index is wrong, bail out. * This driver can ignore safely the receiver notifications, * so ignore those reports too. */ if (dj_report->device_index != DJ_RECEIVER_INDEX) hid_err(hdev, "%s: invalid device index:%d\n", __func__, dj_report->device_index); return false; } spin_lock_irqsave(&djrcv_dev->lock, flags); if (!djrcv_dev->paired_dj_devices[dj_report->device_index]) { /* received an event for an unknown device, bail out */ logi_dj_recv_queue_notification(djrcv_dev, dj_report); goto out; } switch (dj_report->report_type) { case REPORT_TYPE_NOTIF_DEVICE_PAIRED: /* pairing notifications are handled above the switch */ break; case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: logi_dj_recv_queue_notification(djrcv_dev, dj_report); break; case REPORT_TYPE_NOTIF_CONNECTION_STATUS: if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == STATUS_LINKLOSS) { logi_dj_recv_forward_null_report(djrcv_dev, dj_report); } break; default: logi_dj_recv_forward_dj(djrcv_dev, dj_report); } out: spin_unlock_irqrestore(&djrcv_dev->lock, flags); return true; } static int logi_dj_hidpp_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct hidpp_event *hidpp_report = (struct hidpp_event *) data; struct dj_device *dj_dev; unsigned long flags; u8 device_index = hidpp_report->device_index; if (device_index == HIDPP_RECEIVER_INDEX) { /* special case were the device wants to know its unifying * name */ if (size == HIDPP_REPORT_LONG_LENGTH && !memcmp(data, unifying_pairing_answer, sizeof(unifying_pairing_answer))) device_index = (data[4] & 0x0F) + 1; else return false; } /* * Data is from the HID++ collection, in this case, we forward the * data to the corresponding child dj device and return 0 to hid-core * so he data also goes to the hidraw device of the receiver. This * allows a user space application to implement the full HID++ routing * via the receiver. */ if ((device_index < DJ_DEVICE_INDEX_MIN) || (device_index > DJ_DEVICE_INDEX_MAX)) { /* * Device index is wrong, bail out. * This driver can ignore safely the receiver notifications, * so ignore those reports too. */ hid_err(hdev, "%s: invalid device index:%d\n", __func__, hidpp_report->device_index); return false; } spin_lock_irqsave(&djrcv_dev->lock, flags); dj_dev = djrcv_dev->paired_dj_devices[device_index]; /* * With 27 MHz receivers, we do not get an explicit unpair event, * remove the old device if the user has paired a *different* device. */ if (djrcv_dev->type == recvr_type_27mhz && dj_dev && hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED && hidpp_report->params[HIDPP_PARAM_PROTO_TYPE] == 0x02 && hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID] != dj_dev->hdev->product) { struct dj_workitem workitem = { .device_index = hidpp_report->device_index, .type = WORKITEM_TYPE_UNPAIRED, }; kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem)); /* logi_hidpp_recv_queue_notif will queue the work */ dj_dev = NULL; } if (dj_dev) { logi_dj_recv_forward_report(dj_dev, data, size); } else { if (hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED) logi_hidpp_recv_queue_notif(hdev, hidpp_report); else logi_dj_recv_queue_unknown_work(djrcv_dev); } spin_unlock_irqrestore(&djrcv_dev->lock, flags); return false; } static int logi_dj_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); dbg_hid("%s, size:%d\n", __func__, size); if (!djrcv_dev) return 0; if (!hdev->report_enum[HID_INPUT_REPORT].numbered) { if (djrcv_dev->unnumbered_application == HID_GD_KEYBOARD) { /* * For the keyboard, we can reuse the same report by * using the second byte which is constant in the USB * HID report descriptor. */ data[1] = data[0]; data[0] = REPORT_TYPE_KEYBOARD; logi_dj_recv_forward_input_report(hdev, data, size); /* restore previous state */ data[0] = data[1]; data[1] = 0; } /* * Mouse-only receivers send unnumbered mouse data. The 27 MHz * receiver uses 6 byte packets, the nano receiver 8 bytes. */ if (djrcv_dev->unnumbered_application == HID_GD_MOUSE && size <= 8) { u8 mouse_report[9]; /* Prepend report id */ mouse_report[0] = REPORT_TYPE_MOUSE; memcpy(mouse_report + 1, data, size); logi_dj_recv_forward_input_report(hdev, mouse_report, size + 1); } return false; } switch (data[0]) { case REPORT_ID_DJ_SHORT: if (size != DJREPORT_SHORT_LENGTH) { hid_err(hdev, "Short DJ report bad size (%d)", size); return false; } return logi_dj_dj_event(hdev, report, data, size); case REPORT_ID_DJ_LONG: if (size != DJREPORT_LONG_LENGTH) { hid_err(hdev, "Long DJ report bad size (%d)", size); return false; } return logi_dj_dj_event(hdev, report, data, size); case REPORT_ID_HIDPP_SHORT: if (size != HIDPP_REPORT_SHORT_LENGTH) { hid_err(hdev, "Short HID++ report bad size (%d)", size); return false; } return logi_dj_hidpp_event(hdev, report, data, size); case REPORT_ID_HIDPP_LONG: if (size != HIDPP_REPORT_LONG_LENGTH) { hid_err(hdev, "Long HID++ report bad size (%d)", size); return false; } return logi_dj_hidpp_event(hdev, report, data, size); } logi_dj_recv_forward_input_report(hdev, data, size); return false; } static int logi_dj_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct hid_report_enum *rep_enum; struct hid_report *rep; struct dj_receiver_dev *djrcv_dev; struct usb_interface *intf; unsigned int no_dj_interfaces = 0; bool has_hidpp = false; unsigned long flags; int retval; /* * Call to usbhid to fetch the HID descriptors of the current * interface subsequently call to the hid/hid-core to parse the * fetched descriptors. */ retval = hid_parse(hdev); if (retval) { hid_err(hdev, "%s: parse failed\n", __func__); return retval; } /* * Some KVMs add an extra interface for e.g. mouse emulation. If we * treat these as logitech-dj interfaces then this causes input events * reported through this extra interface to not be reported correctly. * To avoid this, we treat these as generic-hid devices. */ switch (id->driver_data) { case recvr_type_dj: no_dj_interfaces = 3; break; case recvr_type_hidpp: no_dj_interfaces = 2; break; case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break; case recvr_type_mouse_only: no_dj_interfaces = 2; break; case recvr_type_27mhz: no_dj_interfaces = 2; break; case recvr_type_bluetooth: no_dj_interfaces = 2; break; case recvr_type_dinovo: no_dj_interfaces = 2; break; } if (hid_is_usb(hdev)) { intf = to_usb_interface(hdev->dev.parent); if (intf && intf->altsetting->desc.bInterfaceNumber >= no_dj_interfaces) { hdev->quirks |= HID_QUIRK_INPUT_PER_APP; return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } } rep_enum = &hdev->report_enum[HID_INPUT_REPORT]; /* no input reports, bail out */ if (list_empty(&rep_enum->report_list)) return -ENODEV; /* * Check for the HID++ application. * Note: we should theoretically check for HID++ and DJ * collections, but this will do. */ list_for_each_entry(rep, &rep_enum->report_list, list) { if (rep->application == 0xff000001) has_hidpp = true; } /* * Ignore interfaces without DJ/HID++ collection, they will not carry * any data, dont create any hid_device for them. */ if (!has_hidpp && id->driver_data == recvr_type_dj) return -ENODEV; /* get the current application attached to the node */ rep = list_first_entry(&rep_enum->report_list, struct hid_report, list); djrcv_dev = dj_get_receiver_dev(hdev, id->driver_data, rep->application, has_hidpp); if (!djrcv_dev) { hid_err(hdev, "%s: dj_get_receiver_dev failed\n", __func__); return -ENOMEM; } if (!rep_enum->numbered) djrcv_dev->unnumbered_application = rep->application; /* Starts the usb device and connects to upper interfaces hiddev and * hidraw */ retval = hid_hw_start(hdev, HID_CONNECT_HIDRAW|HID_CONNECT_HIDDEV); if (retval) { hid_err(hdev, "%s: hid_hw_start returned error\n", __func__); goto hid_hw_start_fail; } if (has_hidpp) { retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0); if (retval < 0) { hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n", __func__, retval); goto switch_to_dj_mode_fail; } } /* This is enabling the polling urb on the IN endpoint */ retval = hid_hw_open(hdev); if (retval < 0) { hid_err(hdev, "%s: hid_hw_open returned error:%d\n", __func__, retval); goto llopen_failed; } /* Allow incoming packets to arrive: */ hid_device_io_start(hdev); if (has_hidpp) { spin_lock_irqsave(&djrcv_dev->lock, flags); djrcv_dev->ready = true; spin_unlock_irqrestore(&djrcv_dev->lock, flags); retval = logi_dj_recv_query_paired_devices(djrcv_dev); if (retval < 0) { hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n", __func__, retval); /* * This can happen with a KVM, let the probe succeed, * logi_dj_recv_queue_unknown_work will retry later. */ } } return 0; llopen_failed: switch_to_dj_mode_fail: hid_hw_stop(hdev); hid_hw_start_fail: dj_put_receiver_dev(hdev); return retval; } #ifdef CONFIG_PM static int logi_dj_reset_resume(struct hid_device *hdev) { int retval; struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); if (!djrcv_dev || djrcv_dev->hidpp != hdev) return 0; retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0); if (retval < 0) { hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n", __func__, retval); } return 0; } #endif static void logi_dj_remove(struct hid_device *hdev) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct dj_device *dj_dev; unsigned long flags; int i; dbg_hid("%s\n", __func__); if (!djrcv_dev) return hid_hw_stop(hdev); /* * This ensures that if the work gets requeued from another * interface of the same receiver it will be a no-op. */ spin_lock_irqsave(&djrcv_dev->lock, flags); djrcv_dev->ready = false; spin_unlock_irqrestore(&djrcv_dev->lock, flags); cancel_work_sync(&djrcv_dev->work); hid_hw_close(hdev); hid_hw_stop(hdev); /* * For proper operation we need access to all interfaces, so we destroy * the paired devices when we're unbound from any interface. * * Note we may still be bound to other interfaces, sharing the same * djrcv_dev, so we need locking here. */ for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) { spin_lock_irqsave(&djrcv_dev->lock, flags); dj_dev = djrcv_dev->paired_dj_devices[i]; djrcv_dev->paired_dj_devices[i] = NULL; spin_unlock_irqrestore(&djrcv_dev->lock, flags); if (dj_dev != NULL) { hid_destroy_device(dj_dev->hdev); kfree(dj_dev); } } dj_put_receiver_dev(hdev); } static const struct hid_device_id logi_dj_receivers[] = { { /* Logitech unifying receiver (0xc52b) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER), .driver_data = recvr_type_dj}, { /* Logitech unifying receiver (0xc532) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2), .driver_data = recvr_type_dj}, { /* Logitech Nano mouse only receiver (0xc52f) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_NANO_RECEIVER), .driver_data = recvr_type_mouse_only}, { /* Logitech Nano (non DJ) receiver (0xc534) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2), .driver_data = recvr_type_hidpp}, { /* Logitech G700(s) receiver (0xc531) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G700_RECEIVER), .driver_data = recvr_type_gaming_hidpp}, { /* Logitech G602 receiver (0xc537) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xc537), .driver_data = recvr_type_gaming_hidpp}, { /* Logitech lightspeed receiver (0xc539) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1), .driver_data = recvr_type_gaming_hidpp}, { /* Logitech powerplay receiver (0xc53a) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY), .driver_data = recvr_type_gaming_hidpp}, { /* Logitech lightspeed receiver (0xc53f) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1), .driver_data = recvr_type_gaming_hidpp}, { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER), .driver_data = recvr_type_27mhz}, { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2), .driver_data = recvr_type_27mhz}, { /* Logitech 27 MHz HID++ 1.0 mouse-only receiver (0xc51b) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER), .driver_data = recvr_type_27mhz}, { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. (0xc70e) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX5000_RECEIVER_KBD_DEV), .driver_data = recvr_type_bluetooth}, { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. (0xc70a) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX5000_RECEIVER_MOUSE_DEV), .driver_data = recvr_type_bluetooth}, { /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. (0xc71b) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX5500_RECEIVER_KBD_DEV), .driver_data = recvr_type_bluetooth}, { /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. (0xc71c) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX5500_RECEIVER_MOUSE_DEV), .driver_data = recvr_type_bluetooth}, { /* Logitech Dinovo Edge HID++ / bluetooth receiver keyboard intf. (0xc713) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_KBD_DEV), .driver_data = recvr_type_dinovo}, { /* Logitech Dinovo Edge HID++ / bluetooth receiver mouse intf. (0xc714) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE_RECEIVER_MOUSE_DEV), .driver_data = recvr_type_dinovo}, { /* Logitech DiNovo Mini HID++ / bluetooth receiver mouse intf. (0xc71e) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI_RECEIVER_KBD_DEV), .driver_data = recvr_type_dinovo}, { /* Logitech DiNovo Mini HID++ / bluetooth receiver keyboard intf. (0xc71f) */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI_RECEIVER_MOUSE_DEV), .driver_data = recvr_type_dinovo}, {} }; MODULE_DEVICE_TABLE(hid, logi_dj_receivers); static struct hid_driver logi_djreceiver_driver = { .name = "logitech-djreceiver", .id_table = logi_dj_receivers, .probe = logi_dj_probe, .remove = logi_dj_remove, .raw_event = logi_dj_raw_event, #ifdef CONFIG_PM .reset_resume = logi_dj_reset_resume, #endif }; module_hid_driver(logi_djreceiver_driver); MODULE_DESCRIPTION("HID driver for Logitech receivers"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Logitech"); MODULE_AUTHOR("Nestor Lopez Casado"); MODULE_AUTHOR("nlopezcasad@logitech.com");
16 16 16 16 16 16 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _NET_CORE_DEV_H #define _NET_CORE_DEV_H #include <linux/cleanup.h> #include <linux/types.h> #include <linux/rwsem.h> #include <linux/netdevice.h> #include <net/netdev_lock.h> struct net; struct netlink_ext_ack; struct cpumask; /* Random bits of netdevice that don't need to be exposed */ #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { u64 count; unsigned int num_buckets; unsigned int history_head; u16 history[FLOW_LIMIT_HISTORY]; u8 buckets[]; }; extern int netdev_flow_limit_table_len; struct napi_struct * netdev_napi_by_id_lock(struct net *net, unsigned int napi_id); struct net_device *dev_get_by_napi_id(unsigned int napi_id); struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex); struct net_device *__netdev_put_lock(struct net_device *dev); struct net_device * netdev_xa_find_lock(struct net *net, struct net_device *dev, unsigned long *index); DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T)); #define for_each_netdev_lock_scoped(net, var_name, ifindex) \ for (struct net_device *var_name __free(netdev_unlock) = NULL; \ (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \ ifindex++) #ifdef CONFIG_PROC_FS int __init dev_proc_init(void); #else #define dev_proc_init() 0 #endif void linkwatch_init_dev(struct net_device *dev); void linkwatch_run_queue(void); void dev_addr_flush(struct net_device *dev); int dev_addr_init(struct net_device *dev); void dev_addr_check(struct net_device *dev); #if IS_ENABLED(CONFIG_NET_SHAPER) void net_shaper_flush_netdev(struct net_device *dev); void net_shaper_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); #else static inline void net_shaper_flush_netdev(struct net_device *dev) {} static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) {} #endif /* sysctls not referred to from outside net/core/ */ extern int netdev_unregister_timeout_secs; extern int weight_p; extern int dev_weight_rx_bias; extern int dev_weight_tx_bias; extern struct rw_semaphore dev_addr_sem; /* rtnl helpers */ extern struct list_head net_todo_list; void netdev_run_todo(void); /* netdev management, shared between various uAPI entry points */ struct netdev_name_node { struct hlist_node hlist; struct list_head list; struct net_device *dev; const char *name; struct rcu_head rcu; }; int netdev_get_name(struct net *net, char *name, int ifindex); int netif_change_name(struct net_device *dev, const char *newname); int dev_change_name(struct net_device *dev, const char *newname); #define netdev_for_each_altname(dev, namenode) \ list_for_each_entry((namenode), &(dev)->name_node->list, list) #define netdev_for_each_altname_safe(dev, namenode, next) \ list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \ list) int netdev_name_node_alt_create(struct net_device *dev, const char *name); int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); int dev_validate_mtu(struct net_device *dev, int mtu, struct netlink_ext_ack *extack); int netif_set_mtu_ext(struct net_device *dev, int new_mtu, struct netlink_ext_ack *extack); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int netif_change_proto_down(struct net_device *dev, bool proto_down); int dev_change_proto_down(struct net_device *dev, bool proto_down); void netdev_change_proto_down_reason_locked(struct net_device *dev, unsigned long mask, u32 value); typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len); int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len); void netif_set_group(struct net_device *dev, int new_group); void dev_set_group(struct net_device *dev, int new_group); int netif_change_carrier(struct net_device *dev, bool new_carrier); int dev_change_carrier(struct net_device *dev, bool new_carrier); void __dev_set_rx_mode(struct net_device *dev); void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, unsigned int gchanges, u32 portid, const struct nlmsghdr *nlh); void unregister_netdevice_many_notify(struct list_head *head, u32 portid, const struct nlmsghdr *nlh); static inline void netif_set_up(struct net_device *dev, bool value) { if (value) dev->flags |= IFF_UP; else dev->flags &= ~IFF_UP; if (!netdev_need_ops_lock(dev)) netdev_lock(dev); dev->up = value; if (!netdev_need_ops_lock(dev)) netdev_unlock(dev); } static inline void netif_set_gso_max_size(struct net_device *dev, unsigned int size) { /* dev->gso_max_size is read locklessly from sk_setup_caps() */ WRITE_ONCE(dev->gso_max_size, size); if (size <= GSO_LEGACY_MAX_SIZE) WRITE_ONCE(dev->gso_ipv4_max_size, size); } static inline void netif_set_gso_max_segs(struct net_device *dev, unsigned int segs) { /* dev->gso_max_segs is read locklessly from sk_setup_caps() */ WRITE_ONCE(dev->gso_max_segs, segs); } static inline void netif_set_gro_max_size(struct net_device *dev, unsigned int size) { /* This pairs with the READ_ONCE() in skb_gro_receive() */ WRITE_ONCE(dev->gro_max_size, size); if (size <= GRO_LEGACY_MAX_SIZE) WRITE_ONCE(dev->gro_ipv4_max_size, size); } static inline void netif_set_gso_ipv4_max_size(struct net_device *dev, unsigned int size) { /* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */ WRITE_ONCE(dev->gso_ipv4_max_size, size); } static inline void netif_set_gro_ipv4_max_size(struct net_device *dev, unsigned int size) { /* This pairs with the READ_ONCE() in skb_gro_receive() */ WRITE_ONCE(dev->gro_ipv4_max_size, size); } /** * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs * @n: napi struct to get the defer_hard_irqs field from * * Return: the per-NAPI value of the defar_hard_irqs field. */ static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n) { return READ_ONCE(n->defer_hard_irqs); } /** * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi * @n: napi_struct to set the defer_hard_irqs field * @defer: the value the field should be set to */ static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer) { WRITE_ONCE(n->defer_hard_irqs, defer); } /** * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set * @defer: the defer_hard_irqs value to set */ static inline void netdev_set_defer_hard_irqs(struct net_device *netdev, u32 defer) { unsigned int count = max(netdev->num_rx_queues, netdev->num_tx_queues); struct napi_struct *napi; int i; WRITE_ONCE(netdev->napi_defer_hard_irqs, defer); list_for_each_entry(napi, &netdev->napi_list, dev_list) napi_set_defer_hard_irqs(napi, defer); for (i = 0; i < count; i++) netdev->napi_config[i].defer_hard_irqs = defer; } /** * napi_get_gro_flush_timeout - get the gro_flush_timeout * @n: napi struct to get the gro_flush_timeout from * * Return: the per-NAPI value of the gro_flush_timeout field. */ static inline unsigned long napi_get_gro_flush_timeout(const struct napi_struct *n) { return READ_ONCE(n->gro_flush_timeout); } /** * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi * @n: napi struct to set the gro_flush_timeout * @timeout: timeout value to set * * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout */ static inline void napi_set_gro_flush_timeout(struct napi_struct *n, unsigned long timeout) { WRITE_ONCE(n->gro_flush_timeout, timeout); } /** * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set * @timeout: the timeout value to set */ static inline void netdev_set_gro_flush_timeout(struct net_device *netdev, unsigned long timeout) { unsigned int count = max(netdev->num_rx_queues, netdev->num_tx_queues); struct napi_struct *napi; int i; WRITE_ONCE(netdev->gro_flush_timeout, timeout); list_for_each_entry(napi, &netdev->napi_list, dev_list) napi_set_gro_flush_timeout(napi, timeout); for (i = 0; i < count; i++) netdev->napi_config[i].gro_flush_timeout = timeout; } /** * napi_get_irq_suspend_timeout - get the irq_suspend_timeout * @n: napi struct to get the irq_suspend_timeout from * * Return: the per-NAPI value of the irq_suspend_timeout field. */ static inline unsigned long napi_get_irq_suspend_timeout(const struct napi_struct *n) { return READ_ONCE(n->irq_suspend_timeout); } /** * napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi * @n: napi struct to set the irq_suspend_timeout * @timeout: timeout value to set * * napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout */ static inline void napi_set_irq_suspend_timeout(struct napi_struct *n, unsigned long timeout) { WRITE_ONCE(n->irq_suspend_timeout, timeout); } int rps_cpumask_housekeeping(struct cpumask *mask); #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) void xdp_do_check_flushed(struct napi_struct *napi); #else static inline void xdp_do_check_flushed(struct napi_struct *napi) { } #endif /* Best effort check that NAPI is not idle (can't be scheduled to run) */ static inline void napi_assert_will_not_race(const struct napi_struct *napi) { /* uninitialized instance, can't race */ if (!napi->poll_list.next) return; /* SCHED bit is set on disabled instances */ WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); WARN_ON(READ_ONCE(napi->list_owner) != -1); } void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); #define XMIT_RECURSION_LIMIT 8 #ifndef CONFIG_PREEMPT_RT static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > XMIT_RECURSION_LIMIT); } static inline void dev_xmit_recursion_inc(void) { __this_cpu_inc(softnet_data.xmit.recursion); } static inline void dev_xmit_recursion_dec(void) { __this_cpu_dec(softnet_data.xmit.recursion); } #else static inline bool dev_xmit_recursion(void) { return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); } static inline void dev_xmit_recursion_inc(void) { current->net_xmit.recursion++; } static inline void dev_xmit_recursion_dec(void) { current->net_xmit.recursion--; } #endif int dev_set_hwtstamp_phylib(struct net_device *dev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack); int dev_get_hwtstamp_phylib(struct net_device *dev, struct kernel_hwtstamp_config *cfg); int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg); #endif
56 56 56 53 17 54 53 53 53 53 53 53 53 55 58 56 55 56 56 56 14 13 13 15 19 17 17 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 // SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/scsi/scsi_proc.c * * The functions in this file provide an interface between * the PROC file system and the SCSI device drivers * It is mainly used for debugging, statistics and to pass * information directly to the lowlevel driver. * * (c) 1995 Michael Neuffer neuffer@goofy.zdv.uni-mainz.de * Version: 0.99.8 last change: 95/09/13 * * generic command parser provided by: * Andreas Heilwagen <crashcar@informatik.uni-koblenz.de> * * generic_proc_info() support of xxxx_info() by: * Michael A. Griffith <grif@acm.org> */ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include "scsi_priv.h" #include "scsi_logging.h" /* 4K page size, but our output routines, use some slack for overruns */ #define PROC_BLOCK_SIZE (3*1024) static struct proc_dir_entry *proc_scsi; /* Protects scsi_proc_list */ static DEFINE_MUTEX(global_host_template_mutex); static LIST_HEAD(scsi_proc_list); /** * struct scsi_proc_entry - (host template, SCSI proc dir) association * @entry: entry in scsi_proc_list. * @sht: SCSI host template associated with the procfs directory. * @proc_dir: procfs directory associated with the SCSI host template. * @present: Number of SCSI hosts instantiated for @sht. */ struct scsi_proc_entry { struct list_head entry; const struct scsi_host_template *sht; struct proc_dir_entry *proc_dir; unsigned int present; }; static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct Scsi_Host *shost = pde_data(file_inode(file)); ssize_t ret = -ENOMEM; char *page; if (count > PROC_BLOCK_SIZE) return -EOVERFLOW; if (!shost->hostt->write_info) return -EINVAL; page = (char *)__get_free_page(GFP_KERNEL); if (page) { ret = -EFAULT; if (copy_from_user(page, buf, count)) goto out; ret = shost->hostt->write_info(shost, page, count); } out: free_page((unsigned long)page); return ret; } static int proc_scsi_show(struct seq_file *m, void *v) { struct Scsi_Host *shost = m->private; return shost->hostt->show_info(m, shost); } static int proc_scsi_host_open(struct inode *inode, struct file *file) { return single_open_size(file, proc_scsi_show, pde_data(inode), 4 * PAGE_SIZE); } static struct scsi_proc_entry * __scsi_lookup_proc_entry(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; lockdep_assert_held(&global_host_template_mutex); list_for_each_entry(e, &scsi_proc_list, entry) if (e->sht == sht) return e; return NULL; } static struct scsi_proc_entry * scsi_lookup_proc_entry(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; mutex_lock(&global_host_template_mutex); e = __scsi_lookup_proc_entry(sht); mutex_unlock(&global_host_template_mutex); return e; } /** * scsi_template_proc_dir() - returns the procfs dir for a SCSI host template * @sht: SCSI host template pointer. */ struct proc_dir_entry * scsi_template_proc_dir(const struct scsi_host_template *sht) { struct scsi_proc_entry *e = scsi_lookup_proc_entry(sht); return e ? e->proc_dir : NULL; } EXPORT_SYMBOL_GPL(scsi_template_proc_dir); static const struct proc_ops proc_scsi_ops = { .proc_open = proc_scsi_host_open, .proc_release = single_release, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_write = proc_scsi_host_write }; /** * scsi_proc_hostdir_add - Create directory in /proc for a scsi host * @sht: owner of this directory * * Sets sht->proc_dir to the new directory. */ int scsi_proc_hostdir_add(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; int ret; if (!sht->show_info) return 0; mutex_lock(&global_host_template_mutex); e = __scsi_lookup_proc_entry(sht); if (!e) { e = kzalloc(sizeof(*e), GFP_KERNEL); if (!e) { ret = -ENOMEM; goto unlock; } } if (e->present++) goto success; e->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); if (!e->proc_dir) { printk(KERN_ERR "%s: proc_mkdir failed for %s\n", __func__, sht->proc_name); ret = -ENOMEM; goto unlock; } e->sht = sht; list_add_tail(&e->entry, &scsi_proc_list); success: e = NULL; ret = 0; unlock: mutex_unlock(&global_host_template_mutex); kfree(e); return ret; } /** * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host * @sht: owner of directory */ void scsi_proc_hostdir_rm(const struct scsi_host_template *sht) { struct scsi_proc_entry *e; if (!sht->show_info) return; mutex_lock(&global_host_template_mutex); e = __scsi_lookup_proc_entry(sht); if (e && !--e->present) { remove_proc_entry(sht->proc_name, proc_scsi); list_del(&e->entry); kfree(e); } mutex_unlock(&global_host_template_mutex); } /** * scsi_proc_host_add - Add entry for this host to appropriate /proc dir * @shost: host to add */ void scsi_proc_host_add(struct Scsi_Host *shost) { const struct scsi_host_template *sht = shost->hostt; struct scsi_proc_entry *e; struct proc_dir_entry *p; char name[10]; if (!sht->show_info) return; e = scsi_lookup_proc_entry(sht); if (!e) goto err; sprintf(name,"%d", shost->host_no); p = proc_create_data(name, S_IRUGO | S_IWUSR, e->proc_dir, &proc_scsi_ops, shost); if (!p) goto err; return; err: shost_printk(KERN_ERR, shost, "%s: Failed to register host (%s failed)\n", __func__, e ? "proc_create_data()" : "scsi_proc_hostdir_add()"); } /** * scsi_proc_host_rm - remove this host's entry from /proc * @shost: which host */ void scsi_proc_host_rm(struct Scsi_Host *shost) { const struct scsi_host_template *sht = shost->hostt; struct scsi_proc_entry *e; char name[10]; if (!sht->show_info) return; e = scsi_lookup_proc_entry(sht); if (!e) return; sprintf(name,"%d", shost->host_no); remove_proc_entry(name, e->proc_dir); } /** * proc_print_scsidevice - return data about this host * @dev: A scsi device * @data: &struct seq_file to output to. * * Description: prints Host, Channel, Id, Lun, Vendor, Model, Rev, Type, * and revision. */ static int proc_print_scsidevice(struct device *dev, void *data) { struct scsi_device *sdev; struct seq_file *s = data; int i; if (!scsi_is_sdev_device(dev)) goto out; sdev = to_scsi_device(dev); seq_printf(s, "Host: scsi%d Channel: %02d Id: %02d Lun: %02llu\n Vendor: ", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); for (i = 0; i < 8; i++) { if (sdev->vendor[i] >= 0x20) seq_putc(s, sdev->vendor[i]); else seq_putc(s, ' '); } seq_puts(s, " Model: "); for (i = 0; i < 16; i++) { if (sdev->model[i] >= 0x20) seq_putc(s, sdev->model[i]); else seq_putc(s, ' '); } seq_puts(s, " Rev: "); for (i = 0; i < 4; i++) { if (sdev->rev[i] >= 0x20) seq_putc(s, sdev->rev[i]); else seq_putc(s, ' '); } seq_putc(s, '\n'); seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); seq_printf(s, " ANSI SCSI revision: %02x", sdev->scsi_level - (sdev->scsi_level > 1)); if (sdev->scsi_level == 2) seq_puts(s, " CCS\n"); else seq_putc(s, '\n'); out: return 0; } /** * scsi_add_single_device - Respond to user request to probe for/add device * @host: user-supplied decimal integer * @channel: user-supplied decimal integer * @id: user-supplied decimal integer * @lun: user-supplied decimal integer * * Description: called by writing "scsi add-single-device" to /proc/scsi/scsi. * * does scsi_host_lookup() and either user_scan() if that transport * type supports it, or else scsi_scan_host_selected() * * Note: this seems to be aimed exclusively at SCSI parallel busses. */ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun) { struct Scsi_Host *shost; int error = -ENXIO; shost = scsi_host_lookup(host); if (!shost) return error; if (shost->transportt->user_scan) error = shost->transportt->user_scan(shost, channel, id, lun); else error = scsi_scan_host_selected(shost, channel, id, lun, SCSI_SCAN_MANUAL); scsi_host_put(shost); return error; } /** * scsi_remove_single_device - Respond to user request to remove a device * @host: user-supplied decimal integer * @channel: user-supplied decimal integer * @id: user-supplied decimal integer * @lun: user-supplied decimal integer * * Description: called by writing "scsi remove-single-device" to * /proc/scsi/scsi. Does a scsi_device_lookup() and scsi_remove_device() */ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun) { struct scsi_device *sdev; struct Scsi_Host *shost; int error = -ENXIO; shost = scsi_host_lookup(host); if (!shost) return error; sdev = scsi_device_lookup(shost, channel, id, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); error = 0; } scsi_host_put(shost); return error; } /** * proc_scsi_write - handle writes to /proc/scsi/scsi * @file: not used * @buf: buffer to write * @length: length of buf, at most PAGE_SIZE * @ppos: not used * * Description: this provides a legacy mechanism to add or remove devices by * Host, Channel, ID, and Lun. To use, * "echo 'scsi add-single-device 0 1 2 3' > /proc/scsi/scsi" or * "echo 'scsi remove-single-device 0 1 2 3' > /proc/scsi/scsi" with * "0 1 2 3" replaced by the Host, Channel, Id, and Lun. * * Note: this seems to be aimed at parallel SCSI. Most modern busses (USB, * SATA, Firewire, Fibre Channel, etc) dynamically assign these values to * provide a unique identifier and nothing more. */ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) return -EINVAL; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) return -ENOMEM; err = -EFAULT; if (copy_from_user(buffer, buf, length)) goto out; err = -EINVAL; if (length < PAGE_SIZE) { end = buffer + length; *end = '\0'; } else { end = buffer + PAGE_SIZE - 1; if (*end) goto out; } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi * with "0 1 2 3" replaced by your "Host Channel Id Lun". */ if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; host = (p < end) ? simple_strtoul(p, &p, 0) : 0; channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); /* * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi * with "0 1 2 3" replaced by your "Host Channel Id Lun". */ } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; host = (p < end) ? simple_strtoul(p, &p, 0) : 0; channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } /* * convert success returns so that we return the * number of bytes consumed. */ if (!err) err = length; out: free_page((unsigned long)buffer); return err; } static inline struct device *next_scsi_device(struct device *start) { struct device *next = bus_find_next_device(&scsi_bus_type, start); put_device(start); return next; } static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos) { struct device *dev = NULL; loff_t n = *pos; while ((dev = next_scsi_device(dev))) { if (!n--) break; sfile->private++; } return dev; } static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos) { (*pos)++; sfile->private++; return next_scsi_device(v); } static void scsi_seq_stop(struct seq_file *sfile, void *v) { put_device(v); } static int scsi_seq_show(struct seq_file *sfile, void *dev) { if (!sfile->private) seq_puts(sfile, "Attached devices:\n"); return proc_print_scsidevice(dev, sfile); } static const struct seq_operations scsi_seq_ops = { .start = scsi_seq_start, .next = scsi_seq_next, .stop = scsi_seq_stop, .show = scsi_seq_show }; /** * proc_scsi_open - glue function * @inode: not used * @file: passed to single_open() * * Associates proc_scsi_show with this file */ static int proc_scsi_open(struct inode *inode, struct file *file) { /* * We don't really need this for the write case but it doesn't * harm either. */ return seq_open(file, &scsi_seq_ops); } static const struct proc_ops scsi_scsi_proc_ops = { .proc_open = proc_scsi_open, .proc_read = seq_read, .proc_write = proc_scsi_write, .proc_lseek = seq_lseek, .proc_release = seq_release, }; /** * scsi_init_procfs - create scsi and scsi/scsi in procfs */ int __init scsi_init_procfs(void) { struct proc_dir_entry *pde; proc_scsi = proc_mkdir("scsi", NULL); if (!proc_scsi) goto err1; pde = proc_create("scsi/scsi", 0, NULL, &scsi_scsi_proc_ops); if (!pde) goto err2; return 0; err2: remove_proc_entry("scsi", NULL); err1: return -ENOMEM; } /** * scsi_exit_procfs - Remove scsi/scsi and scsi from procfs */ void scsi_exit_procfs(void) { remove_proc_entry("scsi/scsi", NULL); remove_proc_entry("scsi", NULL); }
382 182 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 // SPDX-License-Identifier: GPL-2.0 #include <linux/compiler.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> #include <linux/kernel.h> #include <linux/nospec.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/wordpart.h> /* out-of-line parts */ #if !defined(INLINE_COPY_FROM_USER) || defined(CONFIG_RUST) unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { return _inline_copy_from_user(to, from, n); } EXPORT_SYMBOL(_copy_from_user); #endif #if !defined(INLINE_COPY_TO_USER) || defined(CONFIG_RUST) unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { return _inline_copy_to_user(to, from, n); } EXPORT_SYMBOL(_copy_to_user); #endif /** * check_zeroed_user: check if a userspace buffer only contains zero bytes * @from: Source address, in userspace. * @size: Size of buffer. * * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for * userspace addresses (and is more efficient because we don't care where the * first non-zero byte is). * * Returns: * * 0: There were non-zero bytes present in the buffer. * * 1: The buffer was full of zero bytes. * * -EFAULT: access to userspace failed. */ int check_zeroed_user(const void __user *from, size_t size) { unsigned long val; uintptr_t align = (uintptr_t) from % sizeof(unsigned long); if (unlikely(size == 0)) return 1; from -= align; size += align; if (!user_read_access_begin(from, size)) return -EFAULT; unsafe_get_user(val, (unsigned long __user *) from, err_fault); if (align) val &= ~aligned_byte_mask(align); while (size > sizeof(unsigned long)) { if (unlikely(val)) goto done; from += sizeof(unsigned long); size -= sizeof(unsigned long); unsafe_get_user(val, (unsigned long __user *) from, err_fault); } if (size < sizeof(unsigned long)) val &= aligned_byte_mask(size); done: user_read_access_end(); return (val == 0); err_fault: user_read_access_end(); return -EFAULT; } EXPORT_SYMBOL(check_zeroed_user);
2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 // SPDX-License-Identifier: GPL-2.0 /* * Emagic EMI 2|6 usb audio interface firmware loader. * Copyright (C) 2002 * Tapio Laxström (tapio.laxstrom@iptime.fi) * * emi26.c,v 1.13 2002/03/08 13:10:26 tapio Exp */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/ihex.h> #define EMI26_VENDOR_ID 0x086a /* Emagic Soft-und Hardware GmBH */ #define EMI26_PRODUCT_ID 0x0100 /* EMI 2|6 without firmware */ #define EMI26B_PRODUCT_ID 0x0102 /* EMI 2|6 without firmware */ #define ANCHOR_LOAD_INTERNAL 0xA0 /* Vendor specific request code for Anchor Upload/Download (This one is implemented in the core) */ #define ANCHOR_LOAD_EXTERNAL 0xA3 /* This command is not implemented in the core. Requires firmware */ #define ANCHOR_LOAD_FPGA 0xA5 /* This command is not implemented in the core. Requires firmware. Emagic extension */ #define MAX_INTERNAL_ADDRESS 0x1B3F /* This is the highest internal RAM address for the AN2131Q */ #define CPUCS_REG 0x7F92 /* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */ #define INTERNAL_RAM(address) (address <= MAX_INTERNAL_ADDRESS) static int emi26_writememory( struct usb_device *dev, int address, const unsigned char *data, int length, __u8 bRequest); static int emi26_set_reset(struct usb_device *dev, unsigned char reset_bit); static int emi26_load_firmware (struct usb_device *dev); static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id); static void emi26_disconnect(struct usb_interface *intf); /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi26_writememory (struct usb_device *dev, int address, const unsigned char *data, int length, __u8 request) { int result; unsigned char *buffer = kmemdup(data, length, GFP_KERNEL); if (!buffer) { dev_err(&dev->dev, "kmalloc(%d) failed.\n", length); return -ENOMEM; } /* Note: usb_control_msg returns negative value on error or length of the * data that was written! */ result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300); kfree (buffer); return result; } /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi26_set_reset (struct usb_device *dev, unsigned char reset_bit) { int response; dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit); /* printk(KERN_DEBUG "%s - %d", __func__, reset_bit); */ response = emi26_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0); if (response < 0) { dev_err(&dev->dev, "set_reset (%d) failed\n", reset_bit); } return response; } #define FW_LOAD_SIZE 1023 static int emi26_load_firmware (struct usb_device *dev) { const struct firmware *loader_fw = NULL; const struct firmware *bitstream_fw = NULL; const struct firmware *firmware_fw = NULL; const struct ihex_binrec *rec; int err = -ENOMEM; int i; __u32 addr; /* Address to write */ __u8 *buf; buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL); if (!buf) goto wraperr; err = request_ihex_firmware(&loader_fw, "emi26/loader.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&bitstream_fw, "emi26/bitstream.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&firmware_fw, "emi26/firmware.fw", &dev->dev); if (err) { nofw: dev_err(&dev->dev, "%s - request_firmware() failed\n", __func__); goto wraperr; } /* Assert reset (stop the CPU in the EMI) */ err = emi26_set_reset(dev,1); if (err < 0) goto wraperr; rec = (const struct ihex_binrec *)loader_fw->data; /* 1. We need to put the loader for the FPGA into the EZ-USB */ while (rec) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; rec = ihex_next_binrec(rec); } /* De-assert reset (let the CPU run) */ err = emi26_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* 2. We upload the FPGA firmware into the EMI * Note: collect up to 1023 (yes!) bytes and send them with * a single request. This is _much_ faster! */ rec = (const struct ihex_binrec *)bitstream_fw->data; do { i = 0; addr = be32_to_cpu(rec->addr); /* intel hex records are terminated with type 0 element */ while (rec && (i + be16_to_cpu(rec->len) < FW_LOAD_SIZE)) { memcpy(buf + i, rec->data, be16_to_cpu(rec->len)); i += be16_to_cpu(rec->len); rec = ihex_next_binrec(rec); } err = emi26_writememory(dev, addr, buf, i, ANCHOR_LOAD_FPGA); if (err < 0) goto wraperr; } while (rec); /* Assert reset (stop the CPU in the EMI) */ err = emi26_set_reset(dev,1); if (err < 0) goto wraperr; /* 3. We need to put the loader for the firmware into the EZ-USB (again...) */ for (rec = (const struct ihex_binrec *)loader_fw->data; rec; rec = ihex_next_binrec(rec)) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; } msleep(250); /* let device settle */ /* De-assert reset (let the CPU run) */ err = emi26_set_reset(dev,0); if (err < 0) goto wraperr; /* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */ for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (!INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_EXTERNAL); if (err < 0) goto wraperr; } } /* Assert reset (stop the CPU in the EMI) */ err = emi26_set_reset(dev,1); if (err < 0) goto wraperr; for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi26_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; } } /* De-assert reset (let the CPU run) */ err = emi26_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* return 1 to fail the driver inialization * and give real driver change to load */ err = 1; wraperr: if (err < 0) dev_err(&dev->dev,"%s - error loading firmware: error = %d\n", __func__, err); release_firmware(loader_fw); release_firmware(bitstream_fw); release_firmware(firmware_fw); kfree(buf); return err; } static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) }, { USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, id_table); static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); dev_info(&intf->dev, "%s start\n", __func__); emi26_load_firmware(dev); /* do not return the driver context, let real audio driver do that */ return -EIO; } static void emi26_disconnect(struct usb_interface *intf) { } static struct usb_driver emi26_driver = { .name = "emi26 - firmware loader", .probe = emi26_probe, .disconnect = emi26_disconnect, .id_table = id_table, }; module_usb_driver(emi26_driver); MODULE_AUTHOR("Tapio Laxström"); MODULE_DESCRIPTION("Emagic EMI 2|6 firmware loader."); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("emi26/loader.fw"); MODULE_FIRMWARE("emi26/bitstream.fw"); MODULE_FIRMWARE("emi26/firmware.fw"); /* vi:ai:syntax=c:sw=8:ts=8:tw=80 */
11 11 11 11 11 11 11 11 11 11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 // SPDX-License-Identifier: GPL-2.0-or-later /* cx231xx-i2c.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver Based on Cx23885 driver */ #include "cx231xx.h" #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <media/v4l2-common.h> #include <media/tuner.h> /* ----------------------------------------------------------- */ static unsigned int i2c_scan; module_param(i2c_scan, int, 0444); MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time"); static unsigned int i2c_debug; module_param(i2c_debug, int, 0644); MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]"); #define dprintk1(lvl, fmt, args...) \ do { \ if (i2c_debug >= lvl) { \ printk(fmt, ##args); \ } \ } while (0) #define dprintk2(lvl, fmt, args...) \ do { \ if (i2c_debug >= lvl) { \ printk(KERN_DEBUG "%s at %s: " fmt, \ dev->name, __func__ , ##args); \ } \ } while (0) static inline int get_real_i2c_port(struct cx231xx *dev, int bus_nr) { if (bus_nr == 1) return dev->port_3_switch_enabled ? I2C_1_MUX_3 : I2C_1_MUX_1; return bus_nr; } static inline bool is_tuner(struct cx231xx *dev, struct cx231xx_i2c *bus, const struct i2c_msg *msg, int tuner_type) { int i2c_port = get_real_i2c_port(dev, bus->nr); if (i2c_port != dev->board.tuner_i2c_master) return false; if (msg->addr != dev->board.tuner_addr) return false; if (dev->tuner_type != tuner_type) return false; return true; } /* * cx231xx_i2c_send_bytes() */ static int cx231xx_i2c_send_bytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; u16 size = 0; u8 loop = 0; u8 saddr_len = 1; u8 *buf_ptr = NULL; u16 saddr = 0; u8 need_gpio = 0; if (is_tuner(dev, bus, msg, TUNER_XC5000)) { size = msg->len; if (size == 2) { /* register write sub addr */ /* Just writing sub address will cause problem * to XC5000. So ignore the request */ return 0; } else if (size == 4) { /* register write with sub addr */ if (msg->len >= 2) saddr = msg->buf[0] << 8 | msg->buf[1]; else if (msg->len == 1) saddr = msg->buf[0]; switch (saddr) { case 0x0000: /* start tuner calibration mode */ need_gpio = 1; /* FW Loading is done */ dev->xc_fw_load_done = 1; break; case 0x000D: /* Set signal source */ case 0x0001: /* Set TV standard - Video */ case 0x0002: /* Set TV standard - Audio */ case 0x0003: /* Set RF Frequency */ need_gpio = 1; break; default: if (dev->xc_fw_load_done) need_gpio = 1; break; } if (need_gpio) { dprintk1(1, "GPIO WRITE: addr 0x%x, len %d, saddr 0x%x\n", msg->addr, msg->len, saddr); return dev->cx231xx_gpio_i2c_write(dev, msg->addr, msg->buf, msg->len); } } /* special case for Xc5000 tuner case */ saddr_len = 1; /* adjust the length to correct length */ size -= saddr_len; buf_ptr = (u8 *) (msg->buf + 1); do { /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = saddr_len; req_data.saddr_dat = msg->buf[0]; req_data.buf_size = size > 16 ? 16 : size; req_data.p_buffer = (u8 *) (buf_ptr + loop * 16); bus->i2c_nostop = (size > 16) ? 1 : 0; bus->i2c_reserve = (loop == 0) ? 0 : 1; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); loop++; if (size >= 16) size -= 16; else size = 0; } while (size > 0); bus->i2c_nostop = 0; bus->i2c_reserve = 0; } else { /* regular case */ /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = 0; req_data.saddr_dat = 0; req_data.buf_size = msg->len; req_data.p_buffer = msg->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); } return status < 0 ? status : 0; } /* * cx231xx_i2c_recv_bytes() * read a byte from the i2c device */ static int cx231xx_i2c_recv_bytes(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; u16 saddr = 0; u8 need_gpio = 0; if (is_tuner(dev, bus, msg, TUNER_XC5000)) { if (msg->len == 2) saddr = msg->buf[0] << 8 | msg->buf[1]; else if (msg->len == 1) saddr = msg->buf[0]; if (dev->xc_fw_load_done) { switch (saddr) { case 0x0009: /* BUSY check */ dprintk1(1, "GPIO R E A D: Special case BUSY check \n"); /*Try read BUSY register, just set it to zero*/ msg->buf[0] = 0; if (msg->len == 2) msg->buf[1] = 0; return 0; case 0x0004: /* read Lock status */ need_gpio = 1; break; } if (need_gpio) { /* this is a special case to handle Xceive tuner clock stretch issue with gpio based I2C */ dprintk1(1, "GPIO R E A D: addr 0x%x, len %d, saddr 0x%x\n", msg->addr, msg->len, msg->buf[0] << 8 | msg->buf[1]); status = dev->cx231xx_gpio_i2c_write(dev, msg->addr, msg->buf, msg->len); status = dev->cx231xx_gpio_i2c_read(dev, msg->addr, msg->buf, msg->len); return status; } } /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = msg->len; req_data.saddr_dat = msg->buf[0] << 8 | msg->buf[1]; req_data.buf_size = msg->len; req_data.p_buffer = msg->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); } else { /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = msg->flags; req_data.saddr_len = 0; req_data.saddr_dat = 0; req_data.buf_size = msg->len; req_data.p_buffer = msg->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); } return status < 0 ? status : 0; } /* * cx231xx_i2c_recv_bytes_with_saddr() * read a byte from the i2c device */ static int cx231xx_i2c_recv_bytes_with_saddr(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg1, const struct i2c_msg *msg2) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; u16 saddr = 0; u8 need_gpio = 0; if (msg1->len == 2) saddr = msg1->buf[0] << 8 | msg1->buf[1]; else if (msg1->len == 1) saddr = msg1->buf[0]; if (is_tuner(dev, bus, msg2, TUNER_XC5000)) { if ((msg2->len < 16)) { dprintk1(1, "i2c_read: addr 0x%x, len %d, saddr 0x%x, len %d\n", msg2->addr, msg2->len, saddr, msg1->len); switch (saddr) { case 0x0008: /* read FW load status */ need_gpio = 1; break; case 0x0004: /* read Lock status */ need_gpio = 1; break; } if (need_gpio) { status = dev->cx231xx_gpio_i2c_write(dev, msg1->addr, msg1->buf, msg1->len); status = dev->cx231xx_gpio_i2c_read(dev, msg2->addr, msg2->buf, msg2->len); return status; } } } /* prepare xfer_data struct */ req_data.dev_addr = msg2->addr; req_data.direction = msg2->flags; req_data.saddr_len = msg1->len; req_data.saddr_dat = saddr; req_data.buf_size = msg2->len; req_data.p_buffer = msg2->buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); return status < 0 ? status : 0; } /* * cx231xx_i2c_check_for_device() * check if there is a i2c_device at the supplied address */ static int cx231xx_i2c_check_for_device(struct i2c_adapter *i2c_adap, const struct i2c_msg *msg) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; struct cx231xx_i2c_xfer_data req_data; int status = 0; u8 buf[1]; /* prepare xfer_data struct */ req_data.dev_addr = msg->addr; req_data.direction = I2C_M_RD; req_data.saddr_len = 0; req_data.saddr_dat = 0; req_data.buf_size = 1; req_data.p_buffer = buf; /* usb send command */ status = dev->cx231xx_send_usb_command(bus, &req_data); return status < 0 ? status : 0; } /* * cx231xx_i2c_xfer() * the main i2c transfer function */ static int cx231xx_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct cx231xx_i2c *bus = i2c_adap->algo_data; struct cx231xx *dev = bus->dev; int addr, rc, i, byte; mutex_lock(&dev->i2c_lock); for (i = 0; i < num; i++) { addr = msgs[i].addr; dprintk2(2, "%s %s addr=0x%x len=%d:", (msgs[i].flags & I2C_M_RD) ? "read" : "write", i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len); if (!msgs[i].len) { /* no len: check only for device presence */ rc = cx231xx_i2c_check_for_device(i2c_adap, &msgs[i]); if (rc < 0) { dprintk2(2, " no device\n"); mutex_unlock(&dev->i2c_lock); return rc; } } else if (msgs[i].flags & I2C_M_RD) { /* read bytes */ rc = cx231xx_i2c_recv_bytes(i2c_adap, &msgs[i]); if (i2c_debug >= 2) { for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); } } else if (i + 1 < num && (msgs[i + 1].flags & I2C_M_RD) && msgs[i].addr == msgs[i + 1].addr && (msgs[i].len <= 2) && (bus->nr < 3)) { /* write bytes */ if (i2c_debug >= 2) { for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); printk(KERN_CONT "\n"); } /* read bytes */ dprintk2(2, "plus %s %s addr=0x%x len=%d:", (msgs[i+1].flags & I2C_M_RD) ? "read" : "write", i+1 == num - 1 ? "stop" : "nonstop", addr, msgs[i+1].len); rc = cx231xx_i2c_recv_bytes_with_saddr(i2c_adap, &msgs[i], &msgs[i + 1]); if (i2c_debug >= 2) { for (byte = 0; byte < msgs[i+1].len; byte++) printk(KERN_CONT " %02x", msgs[i+1].buf[byte]); } i++; } else { /* write bytes */ if (i2c_debug >= 2) { for (byte = 0; byte < msgs[i].len; byte++) printk(KERN_CONT " %02x", msgs[i].buf[byte]); } rc = cx231xx_i2c_send_bytes(i2c_adap, &msgs[i]); } if (rc < 0) goto err; if (i2c_debug >= 2) printk(KERN_CONT "\n"); } mutex_unlock(&dev->i2c_lock); return num; err: dprintk2(2, " ERROR: %i\n", rc); mutex_unlock(&dev->i2c_lock); return rc; } /* ----------------------------------------------------------- */ /* * functionality() */ static u32 functionality(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; } static const struct i2c_algorithm cx231xx_algo = { .master_xfer = cx231xx_i2c_xfer, .functionality = functionality, }; static const struct i2c_adapter cx231xx_adap_template = { .owner = THIS_MODULE, .name = "cx231xx", .algo = &cx231xx_algo, }; /* ----------------------------------------------------------- */ /* * i2c_devs * incomplete list of known devices */ static const char *i2c_devs[128] = { [0x20 >> 1] = "demod", [0x60 >> 1] = "colibri", [0x88 >> 1] = "hammerhead", [0x8e >> 1] = "CIR", [0x32 >> 1] = "GeminiIII", [0x02 >> 1] = "Aquarius", [0xa0 >> 1] = "eeprom", [0xc0 >> 1] = "tuner", [0xc2 >> 1] = "tuner", }; /* * cx231xx_do_i2c_scan() * check i2c address range for devices */ void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port) { unsigned char buf; int i, rc; struct i2c_adapter *adap; struct i2c_msg msg = { .flags = I2C_M_RD, .len = 1, .buf = &buf, }; if (!i2c_scan) return; /* Don't generate I2C errors during scan */ dev->i2c_scan_running = true; adap = cx231xx_get_i2c_adap(dev, i2c_port); for (i = 0; i < 128; i++) { msg.addr = i; rc = i2c_transfer(adap, &msg, 1); if (rc < 0) continue; dev_info(dev->dev, "i2c scan: found device @ port %d addr 0x%x [%s]\n", i2c_port, i << 1, i2c_devs[i] ? i2c_devs[i] : "???"); } dev->i2c_scan_running = false; } /* * cx231xx_i2c_register() * register i2c bus */ int cx231xx_i2c_register(struct cx231xx_i2c *bus) { struct cx231xx *dev = bus->dev; if (!dev->cx231xx_send_usb_command) return -EINVAL; bus->i2c_adap = cx231xx_adap_template; bus->i2c_adap.dev.parent = dev->dev; snprintf(bus->i2c_adap.name, sizeof(bus->i2c_adap.name), "%s-%d", bus->dev->name, bus->nr); bus->i2c_adap.algo_data = bus; i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev); bus->i2c_rc = i2c_add_adapter(&bus->i2c_adap); if (0 != bus->i2c_rc) dev_warn(dev->dev, "i2c bus %d register FAILED\n", bus->nr); return bus->i2c_rc; } /* * cx231xx_i2c_unregister() * unregister i2c_bus */ void cx231xx_i2c_unregister(struct cx231xx_i2c *bus) { if (!bus->i2c_rc) i2c_del_adapter(&bus->i2c_adap); } /* * cx231xx_i2c_mux_select() * switch i2c master number 1 between port1 and port3 */ static int cx231xx_i2c_mux_select(struct i2c_mux_core *muxc, u32 chan_id) { struct cx231xx *dev = i2c_mux_priv(muxc); return cx231xx_enable_i2c_port_3(dev, chan_id); } int cx231xx_i2c_mux_create(struct cx231xx *dev) { dev->muxc = i2c_mux_alloc(&dev->i2c_bus[1].i2c_adap, dev->dev, 2, 0, 0, cx231xx_i2c_mux_select, NULL); if (!dev->muxc) return -ENOMEM; dev->muxc->priv = dev; return 0; } int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no) { return i2c_mux_add_adapter(dev->muxc, 0, mux_no); } void cx231xx_i2c_mux_unregister(struct cx231xx *dev) { i2c_mux_del_adapters(dev->muxc); } struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port) { switch (i2c_port) { case I2C_0: return &dev->i2c_bus[0].i2c_adap; case I2C_1: return &dev->i2c_bus[1].i2c_adap; case I2C_2: return &dev->i2c_bus[2].i2c_adap; case I2C_1_MUX_1: return dev->muxc->adapter[0]; case I2C_1_MUX_3: return dev->muxc->adapter[1]; default: BUG(); } } EXPORT_SYMBOL_GPL(cx231xx_get_i2c_adap);
751 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H #include <linux/device.h> #include <linux/err.h> #include <linux/dma-direction.h> #include <linux/scatterlist.h> #include <linux/bug.h> /** * List of possible attributes associated with a DMA mapping. The semantics * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. */ /* * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping * may be weakly ordered, that is that reads and writes may pass each other. */ #define DMA_ATTR_WEAK_ORDERING (1UL << 1) /* * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be * buffered to improve performance. */ #define DMA_ATTR_WRITE_COMBINE (1UL << 2) /* * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel * virtual mapping for the allocated buffer. */ #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) /* * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of * the CPU cache for the given buffer assuming that it has been already * transferred to 'device' domain. */ #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) /* * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer * in physical memory. */ #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) /* * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem * that it's probably not worth the time to try to allocate memory to in a way * that gives better TLB efficiency. */ #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) /* * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress * allocation failure reports (similarly to __GFP_NOWARN). */ #define DMA_ATTR_NO_WARN (1UL << 8) /* * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully * accessible at an elevated privilege level (and ideally inaccessible or * at least read-only at lesser-privileged levels). */ #define DMA_ATTR_PRIVILEGED (1UL << 9) /* * A dma_addr_t can hold any valid DMA or bus address for the platform. It can * be given to a device to use as a DMA source or target. It is specific to a * given device and there may be a translation between the CPU physical address * space and the bus address space. * * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not * be used directly in drivers, but checked for using dma_mapping_error() * instead. */ #define DMA_MAPPING_ERROR (~(dma_addr_t)0) #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) #ifdef CONFIG_DMA_API_DEBUG void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len); #else static inline void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { } static inline void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len) { } #endif /* CONFIG_DMA_API_DEBUG */ #ifdef CONFIG_HAS_DMA static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { debug_dma_mapping_error(dev, dma_addr); if (unlikely(dma_addr == DMA_MAPPING_ERROR)) return -ENOMEM; return 0; } dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); int dma_map_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs); dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs); void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); bool dma_can_mmap(struct device *dev); bool dma_pci_p2pdma_supported(struct device *dev); int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); bool dma_addressing_limited(struct device *dev); size_t dma_max_mapping_size(struct device *dev); size_t dma_opt_mapping_size(struct device *dev); unsigned long dma_get_merge_boundary(struct device *dev); struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); void dma_free_noncontiguous(struct device *dev, size_t size, struct sg_table *sgt, enum dma_data_direction dir); void *dma_vmap_noncontiguous(struct device *dev, size_t size, struct sg_table *sgt); void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, size_t size, struct sg_table *sgt); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { return DMA_MAPPING_ERROR; } static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { } static inline unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { return 0; } static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { } static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs) { return -EOPNOTSUPP; } static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { return DMA_MAPPING_ERROR; } static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return -ENOMEM; } static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { return NULL; } static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { } static inline void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { return NULL; } static inline void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { } static inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { return -ENXIO; } static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { return -ENXIO; } static inline bool dma_can_mmap(struct device *dev) { return false; } static inline bool dma_pci_p2pdma_supported(struct device *dev) { return false; } static inline int dma_set_mask(struct device *dev, u64 mask) { return -EIO; } static inline int dma_set_coherent_mask(struct device *dev, u64 mask) { return -EIO; } static inline u64 dma_get_required_mask(struct device *dev) { return 0; } static inline bool dma_addressing_limited(struct device *dev) { return false; } static inline size_t dma_max_mapping_size(struct device *dev) { return 0; } static inline size_t dma_opt_mapping_size(struct device *dev) { return 0; } static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; } static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) { return NULL; } static inline void dma_free_noncontiguous(struct device *dev, size_t size, struct sg_table *sgt, enum dma_data_direction dir) { } static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, struct sg_table *sgt) { return NULL; } static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) { } static inline int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, size_t size, struct sg_table *sgt) { return -EINVAL; } #endif /* CONFIG_HAS_DMA */ #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir); void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir); void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir); bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr); static inline bool dma_dev_need_sync(const struct device *dev) { /* Always call DMA sync operations when debugging is enabled */ return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG); } static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dma_dev_need_sync(dev)) __dma_sync_single_for_cpu(dev, addr, size, dir); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { if (dma_dev_need_sync(dev)) __dma_sync_single_for_device(dev, addr, size, dir); } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { if (dma_dev_need_sync(dev)) __dma_sync_sg_for_cpu(dev, sg, nelems, dir); } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { if (dma_dev_need_sync(dev)) __dma_sync_sg_for_device(dev, sg, nelems, dir); } static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) { return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false; } #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ static inline bool dma_dev_need_sync(const struct device *dev) { return false; } static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { } static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) { return false; } #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */ struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir); int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, size_t size, struct page *page); static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) { struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); return page ? page_address(page) : NULL; } static inline void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) { dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); } static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, unsigned long attrs) { /* DMA must never operate on areas that might be remapped. */ if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), "rejecting DMA map of vmalloc memory\n")) return DMA_MAPPING_ERROR; debug_dma_map_single(dev, ptr, size); return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs); } static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { return dma_unmap_page_attrs(dev, addr, size, dir, attrs); } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir) { return dma_sync_single_for_cpu(dev, addr + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir) { return dma_sync_single_for_device(dev, addr + offset, size, dir); } /** * dma_unmap_sgtable - Unmap the given buffer for DMA * @dev: The device for which to perform the DMA operation * @sgt: The sg_table object describing the buffer * @dir: DMA direction * @attrs: Optional DMA attributes for the unmap operation * * Unmaps a buffer described by a scatterlist stored in the given sg_table * object for the @dir DMA operation by the @dev device. After this function * the ownership of the buffer is transferred back to the CPU domain. */ static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs) { dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); } /** * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access * @dev: The device for which to perform the DMA operation * @sgt: The sg_table object describing the buffer * @dir: DMA direction * * Performs the needed cache synchronization and moves the ownership of the * buffer back to the CPU domain, so it is safe to perform any access to it * by the CPU. Before doing any further DMA operations, one has to transfer * the ownership of the buffer back to the DMA domain by calling the * dma_sync_sgtable_for_device(). */ static inline void dma_sync_sgtable_for_cpu(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir) { dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); } /** * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA * @dev: The device for which to perform the DMA operation * @sgt: The sg_table object describing the buffer * @dir: DMA direction * * Performs the needed cache synchronization and moves the ownership of the * buffer back to the DMA domain, so it is safe to perform the DMA operation. * Once finished, one has to call dma_sync_sgtable_for_cpu() or * dma_unmap_sgtable(). */ static inline void dma_sync_sgtable_for_device(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir) { dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); } #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { return dma_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); } static inline u64 dma_get_mask(struct device *dev) { if (dev->dma_mask && *dev->dma_mask) return *dev->dma_mask; return DMA_BIT_MASK(32); } /* * Set both the DMA mask and the coherent DMA mask to the same thing. * Note that we don't check the return value from dma_set_coherent_mask() * as the DMA API guarantees that the coherent DMA mask can be set to * the same or smaller than the streaming DMA mask. */ static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) { int rc = dma_set_mask(dev, mask); if (rc == 0) dma_set_coherent_mask(dev, mask); return rc; } /* * Similar to the above, except it deals with the case where the device * does not have dev->dma_mask appropriately setup. */ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) { dev->dma_mask = &dev->coherent_dma_mask; return dma_set_mask_and_coherent(dev, mask); } static inline unsigned int dma_get_max_seg_size(struct device *dev) { if (dev->dma_parms && dev->dma_parms->max_segment_size) return dev->dma_parms->max_segment_size; return SZ_64K; } static inline void dma_set_max_seg_size(struct device *dev, unsigned int size) { if (WARN_ON_ONCE(!dev->dma_parms)) return; dev->dma_parms->max_segment_size = size; } static inline unsigned long dma_get_seg_boundary(struct device *dev) { if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) return dev->dma_parms->segment_boundary_mask; return ULONG_MAX; } /** * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units * @dev: device to guery the boundary for * @page_shift: ilog() of the IOMMU page size * * Return the segment boundary in IOMMU page units (which may be different from * the CPU page size) for the passed in device. * * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for * non-DMA API callers. */ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, unsigned int page_shift) { if (!dev) return (U32_MAX >> page_shift) + 1; return (dma_get_seg_boundary(dev) >> page_shift) + 1; } static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask) { if (WARN_ON_ONCE(!dev->dma_parms)) return; dev->dma_parms->segment_boundary_mask = mask; } static inline unsigned int dma_get_min_align_mask(struct device *dev) { if (dev->dma_parms) return dev->dma_parms->min_align_mask; return 0; } static inline void dma_set_min_align_mask(struct device *dev, unsigned int min_align_mask) { if (WARN_ON_ONCE(!dev->dma_parms)) return; dev->dma_parms->min_align_mask = min_align_mask; } #ifndef dma_get_cache_alignment static inline int dma_get_cache_alignment(void) { #ifdef ARCH_HAS_DMA_MINALIGN return ARCH_DMA_MINALIGN; #endif return 1; } #endif static inline void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { return dmam_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { unsigned long attrs = DMA_ATTR_WRITE_COMBINE; if (gfp & __GFP_NOWARN) attrs |= DMA_ATTR_NO_WARN; return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); } static inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) { return dma_free_attrs(dev, size, cpu_addr, dma_addr, DMA_ATTR_WRITE_COMBINE); } static inline int dma_mmap_wc(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, DMA_ATTR_WRITE_COMBINE); } #ifdef CONFIG_NEED_DMA_MAP_STATE #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) #else #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) #define dma_unmap_addr(PTR, ADDR_NAME) (0) #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) #define dma_unmap_len(PTR, LEN_NAME) (0) #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) #endif #endif /* _LINUX_DMA_MAPPING_H */
136 135 135 135 135 136 136 136 136 136 136 136 136 136 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 /* * kmod - the kernel module loader * * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/binfmts.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/cred.h> #include <linux/file.h> #include <linux/workqueue.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/resource.h> #include <linux/notifier.h> #include <linux/suspend.h> #include <linux/rwsem.h> #include <linux/ptrace.h> #include <linux/async.h> #include <linux/uaccess.h> #include <trace/events/module.h> #include "internal.h" /* * Assuming: * * threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, * (u64) THREAD_SIZE * 8UL); * * If you need less than 50 threads would mean we're dealing with systems * smaller than 3200 pages. This assumes you are capable of having ~13M memory, * and this would only be an upper limit, after which the OOM killer would take * effect. Systems like these are very unlikely if modules are enabled. */ #define MAX_KMOD_CONCURRENT 50 static DEFINE_SEMAPHORE(kmod_concurrent_max, MAX_KMOD_CONCURRENT); /* * This is a restriction on having *all* MAX_KMOD_CONCURRENT threads * running at the same time without returning. When this happens we * believe you've somehow ended up with a recursive module dependency * creating a loop. * * We have no option but to fail. * * Userspace should proactively try to detect and prevent these. */ #define MAX_KMOD_ALL_BUSY_TIMEOUT 5 /* modprobe_path is set via /proc/sys. */ char modprobe_path[KMOD_PATH_LEN] = CONFIG_MODPROBE_PATH; static void free_modprobe_argv(struct subprocess_info *info) { kfree(info->argv[3]); /* check call_modprobe() */ kfree(info->argv); } static int call_modprobe(char *orig_module_name, int wait) { struct subprocess_info *info; static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; char *module_name; int ret; char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); if (!argv) goto out; module_name = kstrdup(orig_module_name, GFP_KERNEL); if (!module_name) goto free_argv; argv[0] = modprobe_path; argv[1] = "-q"; argv[2] = "--"; argv[3] = module_name; /* check free_modprobe_argv() */ argv[4] = NULL; info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL, NULL, free_modprobe_argv, NULL); if (!info) goto free_module_name; ret = call_usermodehelper_exec(info, wait | UMH_KILLABLE); kmod_dup_request_announce(orig_module_name, ret); return ret; free_module_name: kfree(module_name); free_argv: kfree(argv); out: kmod_dup_request_announce(orig_module_name, -ENOMEM); return -ENOMEM; } /** * __request_module - try to load a kernel module * @wait: wait (or not) for the operation to complete * @fmt: printf style format string for the name of the module * @...: arguments as specified in the format string * * Load a module using the user mode module loader. The function returns * zero on success or a negative errno code or positive exit code from * "modprobe" on failure. Note that a successful module load does not mean * the module did not then unload and exit on an error of its own. Callers * must check that the service they requested is now available not blindly * invoke it. * * If module auto-loading support is disabled then this function * simply returns -ENOENT. */ int __request_module(bool wait, const char *fmt, ...) { va_list args; char module_name[MODULE_NAME_LEN]; int ret, dup_ret; /* * We don't allow synchronous module loading from async. Module * init may invoke async_synchronize_full() which will end up * waiting for this task which already is waiting for the module * loading to complete, leading to a deadlock. */ WARN_ON_ONCE(wait && current_is_async()); if (!modprobe_path[0]) return -ENOENT; va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); va_end(args); if (ret >= MODULE_NAME_LEN) return -ENAMETOOLONG; ret = security_kernel_module_request(module_name); if (ret) return ret; ret = down_timeout(&kmod_concurrent_max, MAX_KMOD_ALL_BUSY_TIMEOUT * HZ); if (ret) { pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now", module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT); return ret; } trace_module_request(module_name, wait, _RET_IP_); if (kmod_dup_request_exists_wait(module_name, wait, &dup_ret)) { ret = dup_ret; goto out; } ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); out: up(&kmod_concurrent_max); return ret; } EXPORT_SYMBOL(__request_module);
11 9 9 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 // SPDX-License-Identifier: GPL-2.0-only /* * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module * * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) * * Thanks to GENPIX for the sample code used to implement this module. * * This module is based off the vp7045 and vp702x modules */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "gp8psk-fe.h" #include <media/dvb_frontend.h> static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk(fmt, arg...) do { \ if (debug) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) struct gp8psk_fe_state { struct dvb_frontend fe; void *priv; const struct gp8psk_fe_ops *ops; bool is_rev1; u8 lock; u16 snr; unsigned long next_status_check; unsigned long status_check_interval; }; static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe) { struct gp8psk_fe_state *st = fe->demodulator_priv; u8 status; st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1); return status & bmDCtuned; } static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode) { struct gp8psk_fe_state *st = fe->demodulator_priv; return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0); } static int gp8psk_fe_update_status(struct gp8psk_fe_state *st) { u8 buf[6]; if (time_after(jiffies,st->next_status_check)) { st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1); st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6); st->snr = (buf[1]) << 8 | buf[0]; st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; } return 0; } static int gp8psk_fe_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct gp8psk_fe_state *st = fe->demodulator_priv; gp8psk_fe_update_status(st); if (st->lock) *status = FE_HAS_LOCK | FE_HAS_SYNC | FE_HAS_VITERBI | FE_HAS_SIGNAL | FE_HAS_CARRIER; else *status = 0; if (*status & FE_HAS_LOCK) st->status_check_interval = 1000; else st->status_check_interval = 100; return 0; } /* not supported by this Frontend */ static int gp8psk_fe_read_ber(struct dvb_frontend* fe, u32 *ber) { (void) fe; *ber = 0; return 0; } /* not supported by this Frontend */ static int gp8psk_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) { (void) fe; *unc = 0; return 0; } static int gp8psk_fe_read_snr(struct dvb_frontend* fe, u16 *snr) { struct gp8psk_fe_state *st = fe->demodulator_priv; gp8psk_fe_update_status(st); /* snr is reported in dBu*256 */ *snr = st->snr; return 0; } static int gp8psk_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) { struct gp8psk_fe_state *st = fe->demodulator_priv; gp8psk_fe_update_status(st); /* snr is reported in dBu*256 */ /* snr / 38.4 ~= 100% strength */ /* snr * 17 returns 100% strength as 65535 */ if (st->snr > 0xf00) *strength = 0xffff; else *strength = (st->snr << 4) + st->snr; /* snr*17 */ return 0; } static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 800; return 0; } static int gp8psk_fe_set_frontend(struct dvb_frontend *fe) { struct gp8psk_fe_state *st = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u8 cmd[10]; u32 freq = c->frequency * 1000; dprintk("%s()\n", __func__); cmd[4] = freq & 0xff; cmd[5] = (freq >> 8) & 0xff; cmd[6] = (freq >> 16) & 0xff; cmd[7] = (freq >> 24) & 0xff; /* backwards compatibility: DVB-S + 8-PSK were used for Turbo-FEC */ if (c->delivery_system == SYS_DVBS && c->modulation == PSK_8) c->delivery_system = SYS_TURBO; switch (c->delivery_system) { case SYS_DVBS: if (c->modulation != QPSK) { dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } c->fec_inner = FEC_AUTO; break; case SYS_DVBS2: /* kept for backwards compatibility */ dprintk("%s: DVB-S2 delivery system selected\n", __func__); break; case SYS_TURBO: dprintk("%s: Turbo-FEC delivery system selected\n", __func__); break; default: dprintk("%s: unsupported delivery system selected (%d)\n", __func__, c->delivery_system); return -EOPNOTSUPP; } cmd[0] = c->symbol_rate & 0xff; cmd[1] = (c->symbol_rate >> 8) & 0xff; cmd[2] = (c->symbol_rate >> 16) & 0xff; cmd[3] = (c->symbol_rate >> 24) & 0xff; switch (c->modulation) { case QPSK: if (st->is_rev1) if (gp8psk_tuned_to_DCII(fe)) st->ops->reload(st->priv); switch (c->fec_inner) { case FEC_1_2: cmd[9] = 0; break; case FEC_2_3: cmd[9] = 1; break; case FEC_3_4: cmd[9] = 2; break; case FEC_5_6: cmd[9] = 3; break; case FEC_7_8: cmd[9] = 4; break; case FEC_AUTO: cmd[9] = 5; break; default: cmd[9] = 5; break; } if (c->delivery_system == SYS_TURBO) cmd[8] = ADV_MOD_TURBO_QPSK; else cmd[8] = ADV_MOD_DVB_QPSK; break; case PSK_8: /* PSK_8 is for compatibility with DN */ cmd[8] = ADV_MOD_TURBO_8PSK; switch (c->fec_inner) { case FEC_2_3: cmd[9] = 0; break; case FEC_3_4: cmd[9] = 1; break; case FEC_3_5: cmd[9] = 2; break; case FEC_5_6: cmd[9] = 3; break; case FEC_8_9: cmd[9] = 4; break; default: cmd[9] = 0; break; } break; case QAM_16: /* QAM_16 is for compatibility with DN */ cmd[8] = ADV_MOD_TURBO_16QAM; cmd[9] = 0; break; default: /* Unknown modulation */ dprintk("%s: unsupported modulation selected (%d)\n", __func__, c->modulation); return -EOPNOTSUPP; } if (st->is_rev1) gp8psk_set_tuner_mode(fe, 0); st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10); st->lock = 0; st->next_status_check = jiffies; st->status_check_interval = 200; return 0; } static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *m) { struct gp8psk_fe_state *st = fe->demodulator_priv; dprintk("%s\n", __func__); if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0, m->msg, m->msg_len)) { return -EINVAL; } return 0; } static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe, enum fe_sec_mini_cmd burst) { struct gp8psk_fe_state *st = fe->demodulator_priv; u8 cmd; dprintk("%s\n", __func__); /* These commands are certainly wrong */ cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01; if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0, &cmd, 0)) { return -EINVAL; } return 0; } static int gp8psk_fe_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone) { struct gp8psk_fe_state *st = fe->demodulator_priv; if (st->ops->out(st->priv, SET_22KHZ_TONE, (tone == SEC_TONE_ON), 0, NULL, 0)) { return -EINVAL; } return 0; } static int gp8psk_fe_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) { struct gp8psk_fe_state *st = fe->demodulator_priv; if (st->ops->out(st->priv, SET_LNB_VOLTAGE, voltage == SEC_VOLTAGE_18, 0, NULL, 0)) { return -EINVAL; } return 0; } static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff) { struct gp8psk_fe_state *st = fe->demodulator_priv; return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0); } static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd) { struct gp8psk_fe_state *st = fe->demodulator_priv; u8 cmd = sw_cmd & 0x7f; if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0)) return -EINVAL; if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80), 0, NULL, 0)) return -EINVAL; return 0; } static void gp8psk_fe_release(struct dvb_frontend* fe) { struct gp8psk_fe_state *st = fe->demodulator_priv; kfree(st); } static const struct dvb_frontend_ops gp8psk_fe_ops; struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops, void *priv, bool is_rev1) { struct gp8psk_fe_state *st; if (!ops || !ops->in || !ops->out || !ops->reload) { pr_err("Error! gp8psk-fe ops not defined.\n"); return NULL; } st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL); if (!st) return NULL; memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops)); st->fe.demodulator_priv = st; st->ops = ops; st->priv = priv; st->is_rev1 = is_rev1; pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : ""); return &st->fe; } EXPORT_SYMBOL_GPL(gp8psk_fe_attach); static const struct dvb_frontend_ops gp8psk_fe_ops = { .delsys = { SYS_DVBS }, .info = { .name = "Genpix DVB-S", .frequency_min_hz = 800 * MHz, .frequency_max_hz = 2250 * MHz, .frequency_stepsize_hz = 100 * kHz, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, /* ppm */ .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | /* * FE_CAN_QAM_16 is for compatibility * (Myth incorrectly detects Turbo-QPSK as plain QAM-16) */ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_TURBO_FEC }, .release = gp8psk_fe_release, .init = NULL, .sleep = NULL, .set_frontend = gp8psk_fe_set_frontend, .get_tune_settings = gp8psk_fe_get_tune_settings, .read_status = gp8psk_fe_read_status, .read_ber = gp8psk_fe_read_ber, .read_signal_strength = gp8psk_fe_read_signal_strength, .read_snr = gp8psk_fe_read_snr, .read_ucblocks = gp8psk_fe_read_unc_blocks, .diseqc_send_master_cmd = gp8psk_fe_send_diseqc_msg, .diseqc_send_burst = gp8psk_fe_send_diseqc_burst, .set_tone = gp8psk_fe_set_tone, .set_voltage = gp8psk_fe_set_voltage, .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage }; MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>"); MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S"); MODULE_VERSION("1.1"); MODULE_LICENSE("GPL");
9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 // SPDX-License-Identifier: GPL-2.0-only /* * Fence mechanism for dma-buf and to allow for asynchronous dma access * * Copyright (C) 2012 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> */ #include <linux/slab.h> #include <linux/export.h> #include <linux/atomic.h> #include <linux/dma-fence.h> #include <linux/sched/signal.h> #include <linux/seq_file.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); static DEFINE_SPINLOCK(dma_fence_stub_lock); static struct dma_fence dma_fence_stub; /* * fence context counter: each execution context should have its own * fence context, this allows checking if fences belong to the same * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); /** * DOC: DMA fences overview * * DMA fences, represented by &struct dma_fence, are the kernel internal * synchronization primitive for DMA operations like GPU rendering, video * encoding/decoding, or displaying buffers on a screen. * * A fence is initialized using dma_fence_init() and completed using * dma_fence_signal(). Fences are associated with a context, allocated through * dma_fence_context_alloc(), and all fences on the same context are * fully ordered. * * Since the purposes of fences is to facilitate cross-device and * cross-application synchronization, there's multiple ways to use one: * * - Individual fences can be exposed as a &sync_file, accessed as a file * descriptor from userspace, created by calling sync_file_create(). This is * called explicit fencing, since userspace passes around explicit * synchronization points. * * - Some subsystems also have their own explicit fencing primitives, like * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying * fence to be updated. * * - Then there's also implicit fencing, where the synchronization points are * implicitly passed around as part of shared &dma_buf instances. Such * implicit fences are stored in &struct dma_resv through the * &dma_buf.resv pointer. */ /** * DOC: fence cross-driver contract * * Since &dma_fence provide a cross driver contract, all drivers must follow the * same rules: * * * Fences must complete in a reasonable time. Fences which represent kernels * and shaders submitted by userspace, which could run forever, must be backed * up by timeout and gpu hang recovery code. Minimally that code must prevent * further command submission and force complete all in-flight fences, e.g. * when the driver or hardware do not support gpu reset, or if the gpu reset * failed for some reason. Ideally the driver supports gpu recovery which only * affects the offending userspace context, and no other userspace * submissions. * * * Drivers may have different ideas of what completion within a reasonable * time means. Some hang recovery code uses a fixed timeout, others a mix * between observing forward progress and increasingly strict timeouts. * Drivers should not try to second guess timeout handling of fences from * other drivers. * * * To ensure there's no deadlocks of dma_fence_wait() against other locks * drivers should annotate all code required to reach dma_fence_signal(), * which completes the fences, with dma_fence_begin_signalling() and * dma_fence_end_signalling(). * * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock(). * This means any code required for fence completion cannot acquire a * &dma_resv lock. Note that this also pulls in the entire established * locking hierarchy around dma_resv_lock() and dma_resv_unlock(). * * * Drivers are allowed to call dma_fence_wait() from their &shrinker * callbacks. This means any code required for fence completion cannot * allocate memory with GFP_KERNEL. * * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier * respectively &mmu_interval_notifier callbacks. This means any code required * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO. * Only GFP_ATOMIC is permissible, which might fail. * * Note that only GPU drivers have a reasonable excuse for both requiring * &mmu_interval_notifier and &shrinker callbacks at the same time as having to * track asynchronous compute work using &dma_fence. No driver outside of * drivers/gpu should ever call dma_fence_wait() in such contexts. */ static const char *dma_fence_stub_get_name(struct dma_fence *fence) { return "stub"; } static const struct dma_fence_ops dma_fence_stub_ops = { .get_driver_name = dma_fence_stub_get_name, .get_timeline_name = dma_fence_stub_get_name, }; /** * dma_fence_get_stub - return a signaled fence * * Return a stub fence which is already signaled. The fence's * timestamp corresponds to the first time after boot this * function is called. */ struct dma_fence *dma_fence_get_stub(void) { spin_lock(&dma_fence_stub_lock); if (!dma_fence_stub.ops) { dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &dma_fence_stub.flags); dma_fence_signal_locked(&dma_fence_stub); } spin_unlock(&dma_fence_stub_lock); return dma_fence_get(&dma_fence_stub); } EXPORT_SYMBOL(dma_fence_get_stub); /** * dma_fence_allocate_private_stub - return a private, signaled fence * @timestamp: timestamp when the fence was signaled * * Return a newly allocated and signaled stub fence. */ struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) { struct dma_fence *fence; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (fence == NULL) return NULL; dma_fence_init(fence, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); dma_fence_signal_timestamp(fence, timestamp); return fence; } EXPORT_SYMBOL(dma_fence_allocate_private_stub); /** * dma_fence_context_alloc - allocate an array of fence contexts * @num: amount of contexts to allocate * * This function will return the first index of the number of fence contexts * allocated. The fence context is used for setting &dma_fence.context to a * unique number by passing the context to dma_fence_init(). */ u64 dma_fence_context_alloc(unsigned num) { WARN_ON(!num); return atomic64_fetch_add(num, &dma_fence_context_counter); } EXPORT_SYMBOL(dma_fence_context_alloc); /** * DOC: fence signalling annotation * * Proving correctness of all the kernel code around &dma_fence through code * review and testing is tricky for a few reasons: * * * It is a cross-driver contract, and therefore all drivers must follow the * same rules for lock nesting order, calling contexts for various functions * and anything else significant for in-kernel interfaces. But it is also * impossible to test all drivers in a single machine, hence brute-force N vs. * N testing of all combinations is impossible. Even just limiting to the * possible combinations is infeasible. * * * There is an enormous amount of driver code involved. For render drivers * there's the tail of command submission, after fences are published, * scheduler code, interrupt and workers to process job completion, * and timeout, gpu reset and gpu hang recovery code. Plus for integration * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier, * and &shrinker. For modesetting drivers there's the commit tail functions * between when fences for an atomic modeset are published, and when the * corresponding vblank completes, including any interrupt processing and * related workers. Auditing all that code, across all drivers, is not * feasible. * * * Due to how many other subsystems are involved and the locking hierarchies * this pulls in there is extremely thin wiggle-room for driver-specific * differences. &dma_fence interacts with almost all of the core memory * handling through page fault handlers via &dma_resv, dma_resv_lock() and * dma_resv_unlock(). On the other side it also interacts through all * allocation sites through &mmu_notifier and &shrinker. * * Furthermore lockdep does not handle cross-release dependencies, which means * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught * at runtime with some quick testing. The simplest example is one thread * waiting on a &dma_fence while holding a lock:: * * lock(A); * dma_fence_wait(B); * unlock(A); * * while the other thread is stuck trying to acquire the same lock, which * prevents it from signalling the fence the previous thread is stuck waiting * on:: * * lock(A); * unlock(A); * dma_fence_signal(B); * * By manually annotating all code relevant to signalling a &dma_fence we can * teach lockdep about these dependencies, which also helps with the validation * headache since now lockdep can check all the rules for us:: * * cookie = dma_fence_begin_signalling(); * lock(A); * unlock(A); * dma_fence_signal(B); * dma_fence_end_signalling(cookie); * * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to * annotate critical sections the following rules need to be observed: * * * All code necessary to complete a &dma_fence must be annotated, from the * point where a fence is accessible to other threads, to the point where * dma_fence_signal() is called. Un-annotated code can contain deadlock issues, * and due to the very strict rules and many corner cases it is infeasible to * catch these just with review or normal stress testing. * * * &struct dma_resv deserves a special note, since the readers are only * protected by rcu. This means the signalling critical section starts as soon * as the new fences are installed, even before dma_resv_unlock() is called. * * * The only exception are fast paths and opportunistic signalling code, which * calls dma_fence_signal() purely as an optimization, but is not required to * guarantee completion of a &dma_fence. The usual example is a wait IOCTL * which calls dma_fence_signal(), while the mandatory completion path goes * through a hardware interrupt and possible job completion worker. * * * To aid composability of code, the annotations can be freely nested, as long * as the overall locking hierarchy is consistent. The annotations also work * both in interrupt and process context. Due to implementation details this * requires that callers pass an opaque cookie from * dma_fence_begin_signalling() to dma_fence_end_signalling(). * * * Validation against the cross driver contract is implemented by priming * lockdep with the relevant hierarchy at boot-up. This means even just * testing with a single device is enough to validate a driver, at least as * far as deadlocks with dma_fence_wait() against dma_fence_signal() are * concerned. */ #ifdef CONFIG_LOCKDEP static struct lockdep_map dma_fence_lockdep_map = { .name = "dma_fence_map" }; /** * dma_fence_begin_signalling - begin a critical DMA fence signalling section * * Drivers should use this to annotate the beginning of any code section * required to eventually complete &dma_fence by calling dma_fence_signal(). * * The end of these critical sections are annotated with * dma_fence_end_signalling(). * * Returns: * * Opaque cookie needed by the implementation, which needs to be passed to * dma_fence_end_signalling(). */ bool dma_fence_begin_signalling(void) { /* explicitly nesting ... */ if (lock_is_held_type(&dma_fence_lockdep_map, 1)) return true; /* rely on might_sleep check for soft/hardirq locks */ if (in_atomic()) return true; /* ... and non-recursive successful read_trylock */ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_); return false; } EXPORT_SYMBOL(dma_fence_begin_signalling); /** * dma_fence_end_signalling - end a critical DMA fence signalling section * @cookie: opaque cookie from dma_fence_begin_signalling() * * Closes a critical section annotation opened by dma_fence_begin_signalling(). */ void dma_fence_end_signalling(bool cookie) { if (cookie) return; lock_release(&dma_fence_lockdep_map, _RET_IP_); } EXPORT_SYMBOL(dma_fence_end_signalling); void __dma_fence_might_wait(void) { bool tmp; tmp = lock_is_held_type(&dma_fence_lockdep_map, 1); if (tmp) lock_release(&dma_fence_lockdep_map, _THIS_IP_); lock_map_acquire(&dma_fence_lockdep_map); lock_map_release(&dma_fence_lockdep_map); if (tmp) lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_); } #endif /** * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Unlike dma_fence_signal_timestamp(), this function must be called with * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; lockdep_assert_held(fence->lock); if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) return -EINVAL; /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); list_for_each_entry_safe(cur, tmp, &cb_list, node) { INIT_LIST_HEAD(&cur->node); cur->func(fence, cur); } return 0; } EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); /** * dma_fence_signal_timestamp - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) { unsigned long flags; int ret; if (WARN_ON(!fence)) return -EINVAL; spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, timestamp); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_signal_timestamp); /** * dma_fence_signal_locked - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock * held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_locked(struct dma_fence *fence) { return dma_fence_signal_timestamp_locked(fence, ktime_get()); } EXPORT_SYMBOL(dma_fence_signal_locked); /** * dma_fence_signal - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; int ret; bool tmp; if (WARN_ON(!fence)) return -EINVAL; tmp = dma_fence_begin_signalling(); spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); dma_fence_end_signalling(tmp); return ret; } EXPORT_SYMBOL(dma_fence_signal); /** * dma_fence_wait_timeout - sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. Other error values may be * returned on custom implementations. * * Performs a synchronous wait on this fence. It is assumed the caller * directly or indirectly (buf-mgr between reservation and committing) * holds a reference to the fence, otherwise the fence might be * freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_any_timeout(). */ signed long dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) { signed long ret; if (WARN_ON(timeout < 0)) return -EINVAL; might_sleep(); __dma_fence_might_wait(); dma_fence_enable_sw_signaling(fence); trace_dma_fence_wait_start(fence); if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); else ret = dma_fence_default_wait(fence, intr, timeout); trace_dma_fence_wait_end(fence); return ret; } EXPORT_SYMBOL(dma_fence_wait_timeout); /** * dma_fence_release - default release function for fences * @kref: &dma_fence.recfount * * This is the default release functions for &dma_fence. Drivers shouldn't call * this directly, but instead call dma_fence_put(). */ void dma_fence_release(struct kref *kref) { struct dma_fence *fence = container_of(kref, struct dma_fence, refcount); trace_dma_fence_destroy(fence); if (WARN(!list_empty(&fence->cb_list) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), "Fence %s:%s:%llx:%llx released with pending signals!\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno)) { unsigned long flags; /* * Failed to signal before release, likely a refcounting issue. * * This should never happen, but if it does make sure that we * don't leave chains dangling. We set the error flag first * so that the callbacks know this signal is due to an error. */ spin_lock_irqsave(fence->lock, flags); fence->error = -EDEADLK; dma_fence_signal_locked(fence); spin_unlock_irqrestore(fence->lock, flags); } if (fence->ops->release) fence->ops->release(fence); else dma_fence_free(fence); } EXPORT_SYMBOL(dma_fence_release); /** * dma_fence_free - default release function for &dma_fence. * @fence: fence to release * * This is the default implementation for &dma_fence_ops.release. It calls * kfree_rcu() on @fence. */ void dma_fence_free(struct dma_fence *fence) { kfree_rcu(fence, rcu); } EXPORT_SYMBOL(dma_fence_free); static bool __dma_fence_enable_signaling(struct dma_fence *fence) { bool was_set; lockdep_assert_held(fence->lock); was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return false; if (!was_set && fence->ops->enable_signaling) { trace_dma_fence_enable_signal(fence); if (!fence->ops->enable_signaling(fence)) { dma_fence_signal_locked(fence); return false; } } return true; } /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable * * This will request for sw signaling to be enabled, to make the fence * complete as soon as possible. This calls &dma_fence_ops.enable_signaling * internally. */ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; spin_lock_irqsave(fence->lock, flags); __dma_fence_enable_signaling(fence); spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling); /** * dma_fence_add_callback - add a callback to be called when the fence * is signaled * @fence: the fence to wait on * @cb: the callback to register * @func: the function to call * * Add a software callback to the fence. The caller should keep a reference to * the fence. * * @cb will be initialized by dma_fence_add_callback(), no initialization * by the caller is required. Any number of callbacks can be registered * to a fence, but a callback can only be registered to one fence at a time. * * If fence is already signaled, this function will return -ENOENT (and * *not* call the callback). * * Note that the callback can be called from an atomic context or irq context. * * Returns 0 in case of success, -ENOENT if the fence is already signaled * and -EINVAL in case of error. */ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func) { unsigned long flags; int ret = 0; if (WARN_ON(!fence || !func)) return -EINVAL; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { INIT_LIST_HEAD(&cb->node); return -ENOENT; } spin_lock_irqsave(fence->lock, flags); if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); } else { INIT_LIST_HEAD(&cb->node); ret = -ENOENT; } spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_add_callback); /** * dma_fence_get_status - returns the status upon completion * @fence: the dma_fence to query * * This wraps dma_fence_get_status_locked() to return the error status * condition on a signaled fence. See dma_fence_get_status_locked() for more * details. * * Returns 0 if the fence has not yet been signaled, 1 if the fence has * been signaled without an error condition, or a negative error code * if the fence has been completed in err. */ int dma_fence_get_status(struct dma_fence *fence) { unsigned long flags; int status; spin_lock_irqsave(fence->lock, flags); status = dma_fence_get_status_locked(fence); spin_unlock_irqrestore(fence->lock, flags); return status; } EXPORT_SYMBOL(dma_fence_get_status); /** * dma_fence_remove_callback - remove a callback from the signaling list * @fence: the fence to wait on * @cb: the callback to remove * * Remove a previously queued callback from the fence. This function returns * true if the callback is successfully removed, or false if the fence has * already been signaled. * * *WARNING*: * Cancelling a callback should only be done if you really know what you're * doing, since deadlocks and race conditions could occur all too easily. For * this reason, it should only ever be done on hardware lockup recovery, * with a reference held to the fence. * * Behaviour is undefined if @cb has not been added to @fence using * dma_fence_add_callback() beforehand. */ bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { unsigned long flags; bool ret; spin_lock_irqsave(fence->lock, flags); ret = !list_empty(&cb->node); if (ret) list_del_init(&cb->node); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_remove_callback); struct default_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; static void dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct default_wait_cb *wait = container_of(cb, struct default_wait_cb, base); wake_up_state(wait->task, TASK_NORMAL); } /** * dma_fence_default_wait - default sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. If timeout is zero the value one is * returned if the fence is already signaled for consistency with other * functions taking a jiffies timeout. */ signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; spin_lock_irqsave(fence->lock, flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) goto out; if (intr && signal_pending(current)) { ret = -ERESTARTSYS; goto out; } if (!timeout) { ret = 0; goto out; } cb.base.func = dma_fence_default_wait_cb; cb.task = current; list_add(&cb.base.node, &fence->cb_list); while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { if (intr) __set_current_state(TASK_INTERRUPTIBLE); else __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(fence->lock, flags); ret = schedule_timeout(ret); spin_lock_irqsave(fence->lock, flags); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } if (!list_empty(&cb.base.node)) list_del(&cb.base.node); __set_current_state(TASK_RUNNING); out: spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_default_wait); static bool dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, uint32_t *idx) { int i; for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (idx) *idx = i; return true; } } return false; } /** * dma_fence_wait_any_timeout - sleep until any fence gets signaled * or until timeout elapses * @fences: array of fences to wait on * @count: number of fences to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * @idx: used to store the first signaled fence index, meaningful only on * positive return * * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies * on success. * * Synchronous waits for the first fence in the array to be signaled. The * caller needs to hold a reference to all fences in the array, otherwise a * fence might be freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_timeout(). */ signed long dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout, uint32_t *idx) { struct default_wait_cb *cb; signed long ret = timeout; unsigned i; if (WARN_ON(!fences || !count || timeout < 0)) return -EINVAL; if (timeout == 0) { for (i = 0; i < count; ++i) if (dma_fence_is_signaled(fences[i])) { if (idx) *idx = i; return 1; } return 0; } cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); if (cb == NULL) { ret = -ENOMEM; goto err_free_cb; } for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; cb[i].task = current; if (dma_fence_add_callback(fence, &cb[i].base, dma_fence_default_wait_cb)) { /* This fence is already signaled */ if (idx) *idx = i; goto fence_rm_cb; } } while (ret > 0) { if (intr) set_current_state(TASK_INTERRUPTIBLE); else set_current_state(TASK_UNINTERRUPTIBLE); if (dma_fence_test_signaled_any(fences, count, idx)) break; ret = schedule_timeout(ret); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } __set_current_state(TASK_RUNNING); fence_rm_cb: while (i-- > 0) dma_fence_remove_callback(fences[i], &cb[i].base); err_free_cb: kfree(cb); return ret; } EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** * DOC: deadline hints * * In an ideal world, it would be possible to pipeline a workload sufficiently * that a utilization based device frequency governor could arrive at a minimum * frequency that meets the requirements of the use-case, in order to minimize * power consumption. But in the real world there are many workloads which * defy this ideal. For example, but not limited to: * * * Workloads that ping-pong between device and CPU, with alternating periods * of CPU waiting for device, and device waiting on CPU. This can result in * devfreq and cpufreq seeing idle time in their respective domains and in * result reduce frequency. * * * Workloads that interact with a periodic time based deadline, such as double * buffered GPU rendering vs vblank sync'd page flipping. In this scenario, * missing a vblank deadline results in an *increase* in idle time on the GPU * (since it has to wait an additional vblank period), sending a signal to * the GPU's devfreq to reduce frequency, when in fact the opposite is what is * needed. * * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline * (or indirectly via userspace facing ioctls like &sync_set_deadline). * The deadline hint provides a way for the waiting driver, or userspace, to * convey an appropriate sense of urgency to the signaling driver. * * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace * facing APIs). The time could either be some point in the future (such as * the vblank based deadline for page-flipping, or the start of a compositor's * composition cycle), or the current time to indicate an immediate deadline * hint (Ie. forward progress cannot be made until this fence is signaled). * * Multiple deadlines may be set on a given fence, even in parallel. See the * documentation for &dma_fence_ops.set_deadline. * * The deadline hint is just that, a hint. The driver that created the fence * may react by increasing frequency, making different scheduling choices, etc. * Or doing nothing at all. */ /** * dma_fence_set_deadline - set desired fence-wait deadline hint * @fence: the fence that is to be waited on * @deadline: the time by which the waiter hopes for the fence to be * signaled * * Give the fence signaler a hint about an upcoming deadline, such as * vblank, by which point the waiter would prefer the fence to be * signaled by. This is intended to give feedback to the fence signaler * to aid in power management decisions, such as boosting GPU frequency * if a periodic vblank deadline is approaching but the fence is not * yet signaled.. */ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) { if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) fence->ops->set_deadline(fence, deadline); } EXPORT_SYMBOL(dma_fence_set_deadline); /** * dma_fence_describe - Dump fence description into seq_file * @fence: the fence to describe * @seq: the seq_file to put the textual description into * * Dump a textual description of the fence and it's state into the seq_file. */ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { seq_printf(seq, "%s %s seq %llu %ssignalled\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->seqno, dma_fence_is_signaled(fence) ? "" : "un"); } EXPORT_SYMBOL(dma_fence_describe); /** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize * @ops: the dma_fence_ops for operations on this fence * @lock: the irqsafe spinlock to use for locking this fence * @context: the execution context this fence is run on * @seqno: a linear increasing sequence number for this context * * Initializes an allocated fence, the caller doesn't have to keep its * refcount after committing with this fence, but it will need to hold a * refcount again if &dma_fence_ops.enable_signaling gets called. * * context and seqno are used for easy comparison between fences, allowing * to check which fence is later by simply using dma_fence_later(). */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); kref_init(&fence->refcount); fence->ops = ops; INIT_LIST_HEAD(&fence->cb_list); fence->lock = lock; fence->context = context; fence->seqno = seqno; fence->flags = 0UL; fence->error = 0; trace_dma_fence_init(fence); } EXPORT_SYMBOL(dma_fence_init);
85 3183 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM skb #if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SKB_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/tracepoint.h> #undef FN #define FN(reason) TRACE_DEFINE_ENUM(SKB_DROP_REASON_##reason); DEFINE_DROP_REASON(FN, FN) #undef FN #undef FNe #define FN(reason) { SKB_DROP_REASON_##reason, #reason }, #define FNe(reason) { SKB_DROP_REASON_##reason, #reason } /* * Tracepoint for free an sk_buff: */ TRACE_EVENT(kfree_skb, TP_PROTO(struct sk_buff *skb, void *location, enum skb_drop_reason reason, struct sock *rx_sk), TP_ARGS(skb, location, reason, rx_sk), TP_STRUCT__entry( __field(void *, skbaddr) __field(void *, location) __field(void *, rx_sk) __field(unsigned short, protocol) __field(enum skb_drop_reason, reason) ), TP_fast_assign( __entry->skbaddr = skb; __entry->location = location; __entry->rx_sk = rx_sk; __entry->protocol = ntohs(skb->protocol); __entry->reason = reason; ), TP_printk("skbaddr=%p rx_sk=%p protocol=%u location=%pS reason: %s", __entry->skbaddr, __entry->rx_sk, __entry->protocol, __entry->location, __print_symbolic(__entry->reason, DEFINE_DROP_REASON(FN, FNe))) ); #undef FN #undef FNe TRACE_EVENT(consume_skb, TP_PROTO(struct sk_buff *skb, void *location), TP_ARGS(skb, location), TP_STRUCT__entry( __field( void *, skbaddr) __field( void *, location) ), TP_fast_assign( __entry->skbaddr = skb; __entry->location = location; ), TP_printk("skbaddr=%p location=%pS", __entry->skbaddr, __entry->location) ); TRACE_EVENT(skb_copy_datagram_iovec, TP_PROTO(const struct sk_buff *skb, int len), TP_ARGS(skb, len), TP_STRUCT__entry( __field( const void *, skbaddr ) __field( int, len ) ), TP_fast_assign( __entry->skbaddr = skb; __entry->len = len; ), TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len) ); #endif /* _TRACE_SKB_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
26 27 27 27 27 13 26 27 27 27 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 /* SPDX-License-Identifier: GPL-2.0 */ /* * A security context is a set of security attributes * associated with each subject and object controlled * by the security policy. Security contexts are * externally represented as variable-length strings * that can be interpreted by a user or application * with an understanding of the security policy. * Internally, the security server uses a simple * structure. This structure is private to the * security server and can be changed without affecting * clients of the security server. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ #ifndef _SS_CONTEXT_H_ #define _SS_CONTEXT_H_ #include "ebitmap.h" #include "mls_types.h" #include "security.h" /* * A security context consists of an authenticated user * identity, a role, a type and a MLS range. */ struct context { u32 user; u32 role; u32 type; u32 len; /* length of string in bytes */ struct mls_range range; char *str; /* string representation if context cannot be mapped. */ }; static inline void mls_context_init(struct context *c) { memset(&c->range, 0, sizeof(c->range)); } static inline int mls_context_cpy(struct context *dst, const struct context *src) { int rc; dst->range.level[0].sens = src->range.level[0].sens; rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); if (rc) goto out; dst->range.level[1].sens = src->range.level[1].sens; rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); if (rc) ebitmap_destroy(&dst->range.level[0].cat); out: return rc; } /* * Sets both levels in the MLS range of 'dst' to the low level of 'src'. */ static inline int mls_context_cpy_low(struct context *dst, const struct context *src) { int rc; dst->range.level[0].sens = src->range.level[0].sens; rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); if (rc) goto out; dst->range.level[1].sens = src->range.level[0].sens; rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat); if (rc) ebitmap_destroy(&dst->range.level[0].cat); out: return rc; } /* * Sets both levels in the MLS range of 'dst' to the high level of 'src'. */ static inline int mls_context_cpy_high(struct context *dst, const struct context *src) { int rc; dst->range.level[0].sens = src->range.level[1].sens; rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat); if (rc) goto out; dst->range.level[1].sens = src->range.level[1].sens; rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); if (rc) ebitmap_destroy(&dst->range.level[0].cat); out: return rc; } static inline int mls_context_glblub(struct context *dst, const struct context *c1, const struct context *c2) { struct mls_range *dr = &dst->range; const struct mls_range *r1 = &c1->range, *r2 = &c2->range; int rc = 0; if (r1->level[1].sens < r2->level[0].sens || r2->level[1].sens < r1->level[0].sens) /* These ranges have no common sensitivities */ return -EINVAL; /* Take the greatest of the low */ dr->level[0].sens = max(r1->level[0].sens, r2->level[0].sens); /* Take the least of the high */ dr->level[1].sens = min(r1->level[1].sens, r2->level[1].sens); rc = ebitmap_and(&dr->level[0].cat, &r1->level[0].cat, &r2->level[0].cat); if (rc) goto out; rc = ebitmap_and(&dr->level[1].cat, &r1->level[1].cat, &r2->level[1].cat); if (rc) goto out; out: return rc; } static inline bool mls_context_equal(const struct context *c1, const struct context *c2) { return ((c1->range.level[0].sens == c2->range.level[0].sens) && ebitmap_equal(&c1->range.level[0].cat, &c2->range.level[0].cat) && (c1->range.level[1].sens == c2->range.level[1].sens) && ebitmap_equal(&c1->range.level[1].cat, &c2->range.level[1].cat)); } static inline void mls_context_destroy(struct context *c) { ebitmap_destroy(&c->range.level[0].cat); ebitmap_destroy(&c->range.level[1].cat); mls_context_init(c); } static inline void context_init(struct context *c) { memset(c, 0, sizeof(*c)); } static inline int context_cpy(struct context *dst, const struct context *src) { int rc; dst->user = src->user; dst->role = src->role; dst->type = src->type; if (src->str) { dst->str = kstrdup(src->str, GFP_ATOMIC); if (!dst->str) return -ENOMEM; dst->len = src->len; } else { dst->str = NULL; dst->len = 0; } rc = mls_context_cpy(dst, src); if (rc) { kfree(dst->str); dst->str = NULL; dst->len = 0; return rc; } return 0; } static inline void context_destroy(struct context *c) { c->user = c->role = c->type = 0; kfree(c->str); c->str = NULL; c->len = 0; mls_context_destroy(c); } static inline bool context_equal(const struct context *c1, const struct context *c2) { if (c1->len && c2->len) return (c1->len == c2->len && !strcmp(c1->str, c2->str)); if (c1->len || c2->len) return 0; return ((c1->user == c2->user) && (c1->role == c2->role) && (c1->type == c2->type) && mls_context_equal(c1, c2)); } u32 context_compute_hash(const struct context *c); #endif /* _SS_CONTEXT_H_ */
26 31 26 5 31 4 4 3 3 2 1 1 4 15 14 1 13 1 1 1 12 12 11 15 12 11 1 10 9 8 8 10 9 1 8 7 7 7 7 7 3 1 4 6 1 5 4 4 3 2 2 10 1 1 1 1 1 1 10 7 29 29 29 4 4 25 29 2 27 26 1 25 25 15 10 10 10 10 26 2 4 1 3 2 2 2 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 // SPDX-License-Identifier: GPL-2.0-or-later /* * ASIX AX8817X based USB 2.0 Ethernet Devices * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (C) 2006 James Painter <jamie.painter@iname.com> * Copyright (c) 2002-2003 TiVo Inc. */ #include "asix.h" #define PHY_MODE_MARVELL 0x0000 #define MII_MARVELL_LED_CTRL 0x0018 #define MII_MARVELL_STATUS 0x001b #define MII_MARVELL_CTRL 0x0014 #define MARVELL_LED_MANUAL 0x0019 #define MARVELL_STATUS_HWCFG 0x0004 #define MARVELL_CTRL_TXDELAY 0x0002 #define MARVELL_CTRL_RXDELAY 0x0080 #define PHY_MODE_RTL8211CL 0x000C #define AX88772A_PHY14H 0x14 #define AX88772A_PHY14H_DEFAULT 0x442C #define AX88772A_PHY15H 0x15 #define AX88772A_PHY15H_DEFAULT 0x03C8 #define AX88772A_PHY16H 0x16 #define AX88772A_PHY16H_DEFAULT 0x4044 struct ax88172_int_data { __le16 res1; u8 link; __le16 res2; u8 status; __le16 res3; } __packed; static void asix_status(struct usbnet *dev, struct urb *urb) { struct ax88172_int_data *event; int link; if (urb->actual_length < 8) return; event = urb->transfer_buffer; link = event->link & 0x01; if (netif_carrier_ok(dev->net) != link) { usbnet_link_change(dev, link, 1); netdev_dbg(dev->net, "Link Status is: %d\n", link); } } static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr) { if (is_valid_ether_addr(addr)) { eth_hw_addr_set(dev->net, addr); } else { netdev_info(dev->net, "invalid hw address, using random\n"); eth_hw_addr_random(dev->net); } } /* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */ static u32 asix_get_phyid(struct usbnet *dev) { int phy_reg; u32 phy_id; int i; /* Poll for the rare case the FW or phy isn't ready yet. */ for (i = 0; i < 100; i++) { phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1); if (phy_reg < 0) return 0; if (phy_reg != 0 && phy_reg != 0xFFFF) break; mdelay(1); } if (phy_reg <= 0 || phy_reg == 0xFFFF) return 0; phy_id = (phy_reg & 0xffff) << 16; phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2); if (phy_reg < 0) return 0; phy_id |= (phy_reg & 0xffff); return phy_id; } static u32 asix_get_link(struct net_device *net) { struct usbnet *dev = netdev_priv(net); return mii_link_ok(&dev->mii); } static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(net); return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } /* We need to override some ethtool_ops so we require our own structure so we don't interfere with other usbnet devices that may be connected at the same time. */ static const struct ethtool_ops ax88172_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = asix_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_wol = asix_get_wol, .set_wol = asix_set_wol, .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = usbnet_nway_reset, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static void ax88172_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct asix_data *data = (struct asix_data *)&dev->data; u8 rx_ctl = 0x8c; if (net->flags & IFF_PROMISC) { rx_ctl |= 0x01; } else if (net->flags & IFF_ALLMULTI || netdev_mc_count(net) > AX_MAX_MCAST) { rx_ctl |= 0x02; } else if (netdev_mc_empty(net)) { /* just broadcast and directed */ } else { /* We use the 20 byte dev->data * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ struct netdev_hw_addr *ha; u32 crc_bits; memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); /* Build the multicast hash filter. */ netdev_for_each_mc_addr(ha, net) { crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0, AX_MCAST_FILTER_SIZE, data->multi_filter); rx_ctl |= 0x10; } asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL); } static int ax88172_link_reset(struct usbnet *dev) { u8 mode; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; mii_check_media(&dev->mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); mode = AX88172_MEDIUM_DEFAULT; if (ecmd.duplex != DUPLEX_FULL) mode |= ~AX88172_MEDIUM_FD; netdev_dbg(dev->net, "ax88172_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n", ethtool_cmd_speed(&ecmd), ecmd.duplex, mode); asix_write_medium_mode(dev, mode, 0); return 0; } static const struct net_device_ops ax88172_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = asix_ioctl, .ndo_set_rx_mode = ax88172_set_multicast, }; static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits) { unsigned int timeout = 5000; asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, reset_bits); /* give phy_id a chance to process reset */ udelay(500); /* See IEEE 802.3 "22.2.4.1.1 Reset": 500ms max */ while (timeout--) { if (asix_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR) & BMCR_RESET) udelay(100); else return; } netdev_err(dev->net, "BMCR_RESET timeout on phy_id %d\n", dev->mii.phy_id); } static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf) { int ret = 0; u8 buf[ETH_ALEN] = {0}; int i; unsigned long gpio_bits = dev->driver_info->data; usbnet_get_endpoints(dev,intf); /* Toggle the GPIOs in a manufacturer/model specific way */ for (i = 2; i >= 0; i--) { ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, (gpio_bits >> (i * 8)) & 0xff, 0, 0, NULL, 0); if (ret < 0) goto out; msleep(5); } ret = asix_write_rx_ctl(dev, 0x80, 0); if (ret < 0) goto out; /* Get the MAC address */ ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < 0) { netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n", ret); goto out; } asix_set_netdev_dev_addr(dev, buf); /* Initialize MII structure */ dev->mii.dev = dev->net; dev->mii.mdio_read = asix_mdio_read; dev->mii.mdio_write = asix_mdio_write; dev->mii.phy_id_mask = 0x3f; dev->mii.reg_num_mask = 0x1f; dev->mii.phy_id = asix_read_phy_addr(dev, true); if (dev->mii.phy_id < 0) return dev->mii.phy_id; dev->net->netdev_ops = &ax88172_netdev_ops; dev->net->ethtool_ops = &ax88172_ethtool_ops; dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */ asix_phy_reset(dev, BMCR_RESET); asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); mii_nway_restart(&dev->mii); return 0; out: return ret; } static void ax88772_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { switch (sset) { case ETH_SS_TEST: net_selftest_get_strings(data); break; } } static int ax88772_ethtool_get_sset_count(struct net_device *ndev, int sset) { switch (sset) { case ETH_SS_TEST: return net_selftest_get_count(); default: return -EOPNOTSUPP; } } static void ax88772_ethtool_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct usbnet *dev = netdev_priv(ndev); struct asix_common_private *priv = dev->driver_priv; phylink_ethtool_get_pauseparam(priv->phylink, pause); } static int ax88772_ethtool_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct usbnet *dev = netdev_priv(ndev); struct asix_common_private *priv = dev->driver_priv; return phylink_ethtool_set_pauseparam(priv->phylink, pause); } static const struct ethtool_ops ax88772_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = usbnet_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_wol = asix_get_wol, .set_wol = asix_set_wol, .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = phy_ethtool_nway_reset, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .self_test = net_selftest, .get_strings = ax88772_ethtool_get_strings, .get_sset_count = ax88772_ethtool_get_sset_count, .get_pauseparam = ax88772_ethtool_get_pauseparam, .set_pauseparam = ax88772_ethtool_set_pauseparam, }; static int ax88772_reset(struct usbnet *dev) { struct asix_data *data = (struct asix_data *)&dev->data; struct asix_common_private *priv = dev->driver_priv; int ret; /* Rewrite MAC address */ ether_addr_copy(data->mac_addr, dev->net->dev_addr); ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, data->mac_addr, 0); if (ret < 0) goto out; /* Set RX_CTL to default values with 2k buffer, and enable cactus */ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0); if (ret < 0) goto out; ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, 0); if (ret < 0) goto out; phylink_start(priv->phylink); return 0; out: return ret; } static int ax88772_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl; int ret; ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5, in_pm); if (ret < 0) goto out; ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; } if (priv->embd_phy) { ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm); if (ret < 0) goto out; usleep_range(10000, 11000); ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, in_pm); if (ret < 0) goto out; msleep(60); ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL, in_pm); if (ret < 0) goto out; } else { ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL, in_pm); if (ret < 0) goto out; } msleep(150); if (in_pm && (!asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_PHYSID1))){ ret = -EIO; goto out; } ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm); if (ret < 0) goto out; ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, in_pm); if (ret < 0) goto out; ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0, AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, AX88772_IPG2_DEFAULT, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret); goto out; } /* Rewrite MAC address */ ether_addr_copy(data->mac_addr, dev->net->dev_addr); ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, data->mac_addr, in_pm); if (ret < 0) goto out; /* Set RX_CTL to default values with 2k buffer, and enable cactus */ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm); if (ret < 0) goto out; rx_ctl = asix_read_rx_ctl(dev, in_pm); netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n", rx_ctl); rx_ctl = asix_read_medium_status(dev, in_pm); netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n", rx_ctl); return 0; out: return ret; } static int ax88772a_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl, phy14h, phy15h, phy16h; int ret; ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm); if (ret < 0) goto out; ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy | AX_PHYSEL_SSEN, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; } usleep_range(10000, 11000); ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_IPRL, in_pm); if (ret < 0) goto out; usleep_range(10000, 11000); ret = asix_sw_reset(dev, AX_SWRESET_IPRL, in_pm); if (ret < 0) goto out; msleep(160); ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, in_pm); if (ret < 0) goto out; ret = asix_sw_reset(dev, AX_SWRESET_IPRL, in_pm); if (ret < 0) goto out; msleep(200); if (in_pm && (!asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_PHYSID1))) { ret = -1; goto out; } if (priv->chipcode == AX_AX88772B_CHIPCODE) { ret = asix_write_cmd(dev, AX_QCTCTRL, 0x8000, 0x8001, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Write BQ setting failed: %d\n", ret); goto out; } } else if (priv->chipcode == AX_AX88772A_CHIPCODE) { /* Check if the PHY registers have default settings */ phy14h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id, AX88772A_PHY14H); phy15h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id, AX88772A_PHY15H); phy16h = asix_mdio_read_nopm(dev->net, dev->mii.phy_id, AX88772A_PHY16H); netdev_dbg(dev->net, "772a_hw_reset: MR20=0x%x MR21=0x%x MR22=0x%x\n", phy14h, phy15h, phy16h); /* Restore PHY registers default setting if not */ if (phy14h != AX88772A_PHY14H_DEFAULT) asix_mdio_write_nopm(dev->net, dev->mii.phy_id, AX88772A_PHY14H, AX88772A_PHY14H_DEFAULT); if (phy15h != AX88772A_PHY15H_DEFAULT) asix_mdio_write_nopm(dev->net, dev->mii.phy_id, AX88772A_PHY15H, AX88772A_PHY15H_DEFAULT); if (phy16h != AX88772A_PHY16H_DEFAULT) asix_mdio_write_nopm(dev->net, dev->mii.phy_id, AX88772A_PHY16H, AX88772A_PHY16H_DEFAULT); } ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0, AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, AX88772_IPG2_DEFAULT, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret); goto out; } /* Rewrite MAC address */ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN); ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, data->mac_addr, in_pm); if (ret < 0) goto out; /* Set RX_CTL to default values with 2k buffer, and enable cactus */ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm); if (ret < 0) goto out; ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT, in_pm); if (ret < 0) return ret; /* Set RX_CTL to default values with 2k buffer, and enable cactus */ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, in_pm); if (ret < 0) goto out; rx_ctl = asix_read_rx_ctl(dev, in_pm); netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n", rx_ctl); rx_ctl = asix_read_medium_status(dev, in_pm); netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n", rx_ctl); return 0; out: return ret; } static const struct net_device_ops ax88772_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = asix_set_multicast, }; static void ax88772_suspend(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; u16 medium; if (netif_running(dev->net)) { rtnl_lock(); phylink_suspend(priv->phylink, false); rtnl_unlock(); } /* Stop MAC operation */ medium = asix_read_medium_status(dev, 1); medium &= ~AX_MEDIUM_RE; asix_write_medium_mode(dev, medium, 1); netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n", asix_read_medium_status(dev, 1)); } static int asix_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); struct asix_common_private *priv = dev->driver_priv; if (priv && priv->suspend) priv->suspend(dev); return usbnet_suspend(intf, message); } static void ax88772_resume(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; int i; for (i = 0; i < 3; i++) if (!priv->reset(dev, 1)) break; if (netif_running(dev->net)) { rtnl_lock(); phylink_resume(priv->phylink); rtnl_unlock(); } } static int asix_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct asix_common_private *priv = dev->driver_priv; if (priv && priv->resume) priv->resume(dev); return usbnet_resume(intf); } static int ax88772_init_mdio(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; int ret; priv->mdio = mdiobus_alloc(); if (!priv->mdio) return -ENOMEM; priv->mdio->priv = dev; priv->mdio->read = &asix_mdio_bus_read; priv->mdio->write = &asix_mdio_bus_write; priv->mdio->name = "Asix MDIO Bus"; /* mii bus name is usb-<usb bus number>-<usb device number> */ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); ret = mdiobus_register(priv->mdio); if (ret) { netdev_err(dev->net, "Could not register MDIO bus (err %d)\n", ret); mdiobus_free(priv->mdio); priv->mdio = NULL; } return ret; } static void ax88772_mdio_unregister(struct asix_common_private *priv) { mdiobus_unregister(priv->mdio); mdiobus_free(priv->mdio); } static int ax88772_init_phy(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; int ret; priv->phydev = mdiobus_get_phy(priv->mdio, priv->phy_addr); if (!priv->phydev) { netdev_err(dev->net, "Could not find PHY\n"); return -ENODEV; } ret = phylink_connect_phy(priv->phylink, priv->phydev); if (ret) { netdev_err(dev->net, "Could not connect PHY\n"); return ret; } phy_suspend(priv->phydev); priv->phydev->mac_managed_pm = true; phy_attached_info(priv->phydev); if (priv->embd_phy) return 0; /* In case main PHY is not the embedded PHY and MAC is RMII clock * provider, we need to suspend embedded PHY by keeping PLL enabled * (AX_SWRESET_IPPD == 0). */ priv->phydev_int = mdiobus_get_phy(priv->mdio, AX_EMBD_PHY_ADDR); if (!priv->phydev_int) { rtnl_lock(); phylink_disconnect_phy(priv->phylink); rtnl_unlock(); netdev_err(dev->net, "Could not find internal PHY\n"); return -ENODEV; } priv->phydev_int->mac_managed_pm = true; phy_suspend(priv->phydev_int); return 0; } static void ax88772_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { /* Nothing to do */ } static void ax88772_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct usbnet *dev = netdev_priv(to_net_dev(config->dev)); asix_write_medium_mode(dev, 0, 0); usbnet_link_change(dev, false, false); } static void ax88772_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { struct usbnet *dev = netdev_priv(to_net_dev(config->dev)); u16 m = AX_MEDIUM_AC | AX_MEDIUM_RE; m |= duplex ? AX_MEDIUM_FD : 0; switch (speed) { case SPEED_100: m |= AX_MEDIUM_PS; break; case SPEED_10: break; default: return; } if (tx_pause) m |= AX_MEDIUM_TFC; if (rx_pause) m |= AX_MEDIUM_RFC; asix_write_medium_mode(dev, m, 0); usbnet_link_change(dev, true, false); } static const struct phylink_mac_ops ax88772_phylink_mac_ops = { .mac_config = ax88772_mac_config, .mac_link_down = ax88772_mac_link_down, .mac_link_up = ax88772_mac_link_up, }; static int ax88772_phylink_setup(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; phy_interface_t phy_if_mode; struct phylink *phylink; priv->phylink_config.dev = &dev->net->dev; priv->phylink_config.type = PHYLINK_NETDEV; priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100; __set_bit(PHY_INTERFACE_MODE_INTERNAL, priv->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_RMII, priv->phylink_config.supported_interfaces); if (priv->embd_phy) phy_if_mode = PHY_INTERFACE_MODE_INTERNAL; else phy_if_mode = PHY_INTERFACE_MODE_RMII; phylink = phylink_create(&priv->phylink_config, dev->net->dev.fwnode, phy_if_mode, &ax88772_phylink_mac_ops); if (IS_ERR(phylink)) return PTR_ERR(phylink); priv->phylink = phylink; return 0; } static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) { struct asix_common_private *priv; u8 buf[ETH_ALEN] = {0}; int ret, i; priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev->driver_priv = priv; usbnet_get_endpoints(dev, intf); /* Maybe the boot loader passed the MAC address via device tree */ if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { netif_dbg(dev, ifup, dev->net, "MAC address read from device tree"); } else { /* Try getting the MAC address from EEPROM */ if (dev->driver_info->data & FLAG_EEPROM_MAC) { for (i = 0; i < (ETH_ALEN >> 1); i++) { ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x04 + i, 0, 2, buf + i * 2, 0); if (ret < 0) break; } } else { ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); } if (ret < 0) { netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } } asix_set_netdev_dev_addr(dev, buf); dev->net->netdev_ops = &ax88772_netdev_ops; dev->net->ethtool_ops = &ax88772_ethtool_ops; dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */ ret = asix_read_phy_addr(dev, true); if (ret < 0) return ret; priv->phy_addr = ret; priv->embd_phy = ((priv->phy_addr & 0x1f) == AX_EMBD_PHY_ADDR); ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &priv->chipcode, 0); if (ret < 0) { netdev_dbg(dev->net, "Failed to read STATMNGSTS_REG: %d\n", ret); return ret; } priv->chipcode &= AX_CHIPCODE_MASK; priv->resume = ax88772_resume; priv->suspend = ax88772_suspend; if (priv->chipcode == AX_AX88772_CHIPCODE) priv->reset = ax88772_hw_reset; else priv->reset = ax88772a_hw_reset; ret = priv->reset(dev, 0); if (ret < 0) { netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret); return ret; } /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support jumbo eth frames */ dev->rx_urb_size = 2048; } priv->presvd_phy_bmcr = 0; priv->presvd_phy_advertise = 0; ret = ax88772_init_mdio(dev); if (ret) goto mdio_err; ret = ax88772_phylink_setup(dev); if (ret) goto phylink_err; ret = ax88772_init_phy(dev); if (ret) goto initphy_err; return 0; initphy_err: phylink_destroy(priv->phylink); phylink_err: ax88772_mdio_unregister(priv); mdio_err: return ret; } static int ax88772_stop(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; phylink_stop(priv->phylink); return 0; } static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) { struct asix_common_private *priv = dev->driver_priv; rtnl_lock(); phylink_disconnect_phy(priv->phylink); rtnl_unlock(); phylink_destroy(priv->phylink); ax88772_mdio_unregister(priv); asix_rx_fixup_common_free(dev->driver_priv); } static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) { asix_rx_fixup_common_free(dev->driver_priv); kfree(dev->driver_priv); } static const struct ethtool_ops ax88178_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = asix_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_wol = asix_get_wol, .set_wol = asix_set_wol, .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = usbnet_nway_reset, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int marvell_phy_init(struct usbnet *dev) { struct asix_data *data = (struct asix_data *)&dev->data; u16 reg; netdev_dbg(dev->net, "marvell_phy_init()\n"); reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS); netdev_dbg(dev->net, "MII_MARVELL_STATUS = 0x%04x\n", reg); asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL, MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY); if (data->ledmode) { reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_LED_CTRL); netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (1) = 0x%04x\n", reg); reg &= 0xf8ff; reg |= (1 + 0x0100); asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_LED_CTRL, reg); reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_LED_CTRL); netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg); } return 0; } static int rtl8211cl_phy_init(struct usbnet *dev) { struct asix_data *data = (struct asix_data *)&dev->data; netdev_dbg(dev->net, "rtl8211cl_phy_init()\n"); asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0x0005); asix_mdio_write (dev->net, dev->mii.phy_id, 0x0c, 0); asix_mdio_write (dev->net, dev->mii.phy_id, 0x01, asix_mdio_read (dev->net, dev->mii.phy_id, 0x01) | 0x0080); asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0); if (data->ledmode == 12) { asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0x0002); asix_mdio_write (dev->net, dev->mii.phy_id, 0x1a, 0x00cb); asix_mdio_write (dev->net, dev->mii.phy_id, 0x1f, 0); } return 0; } static int marvell_led_status(struct usbnet *dev, u16 speed) { u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL); netdev_dbg(dev->net, "marvell_led_status() read 0x%04x\n", reg); /* Clear out the center LED bits - 0x03F0 */ reg &= 0xfc0f; switch (speed) { case SPEED_1000: reg |= 0x03e0; break; case SPEED_100: reg |= 0x03b0; break; default: reg |= 0x02f0; } netdev_dbg(dev->net, "marvell_led_status() writing 0x%04x\n", reg); asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg); return 0; } static int ax88178_reset(struct usbnet *dev) { struct asix_data *data = (struct asix_data *)&dev->data; int ret; __le16 eeprom; u8 status; int gpio0 = 0; u32 phyid; ret = asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status, 0); if (ret < 0) { netdev_dbg(dev->net, "Failed to read GPIOS: %d\n", ret); return ret; } netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status); asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL, 0); ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom, 0); if (ret < 0) { netdev_dbg(dev->net, "Failed to read EEPROM: %d\n", ret); return ret; } asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL, 0); netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom); if (eeprom == cpu_to_le16(0xffff)) { data->phymode = PHY_MODE_MARVELL; data->ledmode = 0; gpio0 = 1; } else { data->phymode = le16_to_cpu(eeprom) & 0x7F; data->ledmode = le16_to_cpu(eeprom) >> 8; gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; } netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode); /* Power up external GigaPHY through AX88178 GPIO pin */ asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40, 0); if ((le16_to_cpu(eeprom) >> 8) != 1) { asix_write_gpio(dev, 0x003c, 30, 0); asix_write_gpio(dev, 0x001c, 300, 0); asix_write_gpio(dev, 0x003c, 30, 0); } else { netdev_dbg(dev->net, "gpio phymode == 1 path\n"); asix_write_gpio(dev, AX_GPIO_GPO1EN, 30, 0); asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30, 0); } /* Read PHYID register *AFTER* powering up PHY */ phyid = asix_get_phyid(dev); netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid); /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */ asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL, 0); asix_sw_reset(dev, 0, 0); msleep(150); asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD, 0); msleep(150); asix_write_rx_ctl(dev, 0, 0); if (data->phymode == PHY_MODE_MARVELL) { marvell_phy_init(dev); msleep(60); } else if (data->phymode == PHY_MODE_RTL8211CL) rtl8211cl_phy_init(dev); asix_phy_reset(dev, BMCR_RESET | BMCR_ANENABLE); asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); asix_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000, ADVERTISE_1000FULL); asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT, 0); mii_nway_restart(&dev->mii); /* Rewrite MAC address */ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN); ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, data->mac_addr, 0); if (ret < 0) return ret; ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0); if (ret < 0) return ret; return 0; } static int ax88178_link_reset(struct usbnet *dev) { u16 mode; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; struct asix_data *data = (struct asix_data *)&dev->data; u32 speed; netdev_dbg(dev->net, "ax88178_link_reset()\n"); mii_check_media(&dev->mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); mode = AX88178_MEDIUM_DEFAULT; speed = ethtool_cmd_speed(&ecmd); if (speed == SPEED_1000) mode |= AX_MEDIUM_GM; else if (speed == SPEED_100) mode |= AX_MEDIUM_PS; else mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM); mode |= AX_MEDIUM_ENCK; if (ecmd.duplex == DUPLEX_FULL) mode |= AX_MEDIUM_FD; else mode &= ~AX_MEDIUM_FD; netdev_dbg(dev->net, "ax88178_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n", speed, ecmd.duplex, mode); asix_write_medium_mode(dev, mode, 0); if (data->phymode == PHY_MODE_MARVELL && data->ledmode) marvell_led_status(dev, speed); return 0; } static void ax88178_set_mfb(struct usbnet *dev) { u16 mfb = AX_RX_CTL_MFB_16384; u16 rxctl; u16 medium; int old_rx_urb_size = dev->rx_urb_size; if (dev->hard_mtu < 2048) { dev->rx_urb_size = 2048; mfb = AX_RX_CTL_MFB_2048; } else if (dev->hard_mtu < 4096) { dev->rx_urb_size = 4096; mfb = AX_RX_CTL_MFB_4096; } else if (dev->hard_mtu < 8192) { dev->rx_urb_size = 8192; mfb = AX_RX_CTL_MFB_8192; } else if (dev->hard_mtu < 16384) { dev->rx_urb_size = 16384; mfb = AX_RX_CTL_MFB_16384; } rxctl = asix_read_rx_ctl(dev, 0); asix_write_rx_ctl(dev, (rxctl & ~AX_RX_CTL_MFB_16384) | mfb, 0); medium = asix_read_medium_status(dev, 0); if (dev->net->mtu > 1500) medium |= AX_MEDIUM_JFE; else medium &= ~AX_MEDIUM_JFE; asix_write_medium_mode(dev, medium, 0); if (dev->rx_urb_size > old_rx_urb_size) usbnet_unlink_rx_urbs(dev); } static int ax88178_change_mtu(struct net_device *net, int new_mtu) { struct usbnet *dev = netdev_priv(net); int ll_mtu = new_mtu + net->hard_header_len + 4; netdev_dbg(dev->net, "ax88178_change_mtu() new_mtu=%d\n", new_mtu); if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; ax88178_set_mfb(dev); /* max qlen depend on hard_mtu and rx_urb_size */ usbnet_update_max_qlen(dev); return 0; } static const struct net_device_ops ax88178_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = asix_set_multicast, .ndo_eth_ioctl = asix_ioctl, .ndo_change_mtu = ax88178_change_mtu, }; static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; u8 buf[ETH_ALEN] = {0}; usbnet_get_endpoints(dev,intf); /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < 0) { netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } asix_set_netdev_dev_addr(dev, buf); /* Initialize MII structure */ dev->mii.dev = dev->net; dev->mii.mdio_read = asix_mdio_read; dev->mii.mdio_write = asix_mdio_write; dev->mii.phy_id_mask = 0x1f; dev->mii.reg_num_mask = 0xff; dev->mii.supports_gmii = 1; dev->mii.phy_id = asix_read_phy_addr(dev, true); if (dev->mii.phy_id < 0) return dev->mii.phy_id; dev->net->netdev_ops = &ax88178_netdev_ops; dev->net->ethtool_ops = &ax88178_ethtool_ops; dev->net->max_mtu = 16384 - (dev->net->hard_header_len + 4); /* Blink LEDS so users know driver saw dongle */ asix_sw_reset(dev, 0, 0); msleep(150); asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD, 0); msleep(150); /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support jumbo eth frames */ dev->rx_urb_size = 2048; } dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL); if (!dev->driver_priv) return -ENOMEM; return 0; } static const struct driver_info ax8817x_info = { .description = "ASIX AX8817x USB 2.0 Ethernet", .bind = ax88172_bind, .status = asix_status, .link_reset = ax88172_link_reset, .reset = ax88172_link_reset, .flags = FLAG_ETHER | FLAG_LINK_INTR, .data = 0x00130103, }; static const struct driver_info dlink_dub_e100_info = { .description = "DLink DUB-E100 USB Ethernet", .bind = ax88172_bind, .status = asix_status, .link_reset = ax88172_link_reset, .reset = ax88172_link_reset, .flags = FLAG_ETHER | FLAG_LINK_INTR, .data = 0x009f9d9f, }; static const struct driver_info netgear_fa120_info = { .description = "Netgear FA-120 USB Ethernet", .bind = ax88172_bind, .status = asix_status, .link_reset = ax88172_link_reset, .reset = ax88172_link_reset, .flags = FLAG_ETHER | FLAG_LINK_INTR, .data = 0x00130103, }; static const struct driver_info hawking_uf200_info = { .description = "Hawking UF200 USB Ethernet", .bind = ax88172_bind, .status = asix_status, .link_reset = ax88172_link_reset, .reset = ax88172_link_reset, .flags = FLAG_ETHER | FLAG_LINK_INTR, .data = 0x001f1d1f, }; static const struct driver_info ax88772_info = { .description = "ASIX AX88772 USB 2.0 Ethernet", .bind = ax88772_bind, .unbind = ax88772_unbind, .status = asix_status, .reset = ax88772_reset, .stop = ax88772_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, }; static const struct driver_info ax88772b_info = { .description = "ASIX AX88772B USB 2.0 Ethernet", .bind = ax88772_bind, .unbind = ax88772_unbind, .status = asix_status, .reset = ax88772_reset, .stop = ax88772_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, .data = FLAG_EEPROM_MAC, }; static const struct driver_info lxausb_t1l_info = { .description = "Linux Automation GmbH USB 10Base-T1L", .bind = ax88772_bind, .unbind = ax88772_unbind, .status = asix_status, .reset = ax88772_reset, .stop = ax88772_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, .data = FLAG_EEPROM_MAC, }; static const struct driver_info ax88178_info = { .description = "ASIX AX88178 USB 2.0 Ethernet", .bind = ax88178_bind, .unbind = ax88178_unbind, .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, }; /* * USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in * no-name packaging. * USB device strings are: * 1: Manufacturer: USBLINK * 2: Product: HG20F9 USB2.0 * 3: Serial: 000003 * Appears to be compatible with Asix 88772B. */ static const struct driver_info hg20f9_info = { .description = "HG20F9 USB 2.0 Ethernet", .bind = ax88772_bind, .unbind = ax88772_unbind, .status = asix_status, .reset = ax88772_reset, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, .data = FLAG_EEPROM_MAC, }; static const struct driver_info lyconsys_fibergecko100_info = { .description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter", .bind = ax88178_bind, .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_link_reset, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, .data = 0x20061201, }; static const struct usb_device_id products [] = { { // Linksys USB200M USB_DEVICE (0x077b, 0x2226), .driver_info = (unsigned long) &ax8817x_info, }, { // Netgear FA120 USB_DEVICE (0x0846, 0x1040), .driver_info = (unsigned long) &netgear_fa120_info, }, { // DLink DUB-E100 USB_DEVICE (0x2001, 0x1a00), .driver_info = (unsigned long) &dlink_dub_e100_info, }, { // Intellinet, ST Lab USB Ethernet USB_DEVICE (0x0b95, 0x1720), .driver_info = (unsigned long) &ax8817x_info, }, { // Hawking UF200, TrendNet TU2-ET100 USB_DEVICE (0x07b8, 0x420a), .driver_info = (unsigned long) &hawking_uf200_info, }, { // Billionton Systems, USB2AR USB_DEVICE (0x08dd, 0x90ff), .driver_info = (unsigned long) &ax8817x_info, }, { // Billionton Systems, GUSB2AM-1G-B USB_DEVICE(0x08dd, 0x0114), .driver_info = (unsigned long) &ax88178_info, }, { // ATEN UC210T USB_DEVICE (0x0557, 0x2009), .driver_info = (unsigned long) &ax8817x_info, }, { // Buffalo LUA-U2-KTX USB_DEVICE (0x0411, 0x003d), .driver_info = (unsigned long) &ax8817x_info, }, { // Buffalo LUA-U2-GT 10/100/1000 USB_DEVICE (0x0411, 0x006e), .driver_info = (unsigned long) &ax88178_info, }, { // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" USB_DEVICE (0x6189, 0x182d), .driver_info = (unsigned long) &ax8817x_info, }, { // Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter" USB_DEVICE (0x0df6, 0x0056), .driver_info = (unsigned long) &ax88178_info, }, { // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter" USB_DEVICE (0x0df6, 0x061c), .driver_info = (unsigned long) &ax88178_info, }, { // corega FEther USB2-TX USB_DEVICE (0x07aa, 0x0017), .driver_info = (unsigned long) &ax8817x_info, }, { // Surecom EP-1427X-2 USB_DEVICE (0x1189, 0x0893), .driver_info = (unsigned long) &ax8817x_info, }, { // goodway corp usb gwusb2e USB_DEVICE (0x1631, 0x6200), .driver_info = (unsigned long) &ax8817x_info, }, { // JVC MP-PRX1 Port Replicator USB_DEVICE (0x04f1, 0x3008), .driver_info = (unsigned long) &ax8817x_info, }, { // Lenovo U2L100P 10/100 USB_DEVICE (0x17ef, 0x7203), .driver_info = (unsigned long)&ax88772b_info, }, { // ASIX AX88772B 10/100 USB_DEVICE (0x0b95, 0x772b), .driver_info = (unsigned long) &ax88772b_info, }, { // ASIX AX88772 10/100 USB_DEVICE (0x0b95, 0x7720), .driver_info = (unsigned long) &ax88772_info, }, { // ASIX AX88178 10/100/1000 USB_DEVICE (0x0b95, 0x1780), .driver_info = (unsigned long) &ax88178_info, }, { // Logitec LAN-GTJ/U2A USB_DEVICE (0x0789, 0x0160), .driver_info = (unsigned long) &ax88178_info, }, { // Linksys USB200M Rev 2 USB_DEVICE (0x13b1, 0x0018), .driver_info = (unsigned long) &ax88772_info, }, { // 0Q0 cable ethernet USB_DEVICE (0x1557, 0x7720), .driver_info = (unsigned long) &ax88772_info, }, { // DLink DUB-E100 H/W Ver B1 USB_DEVICE (0x07d1, 0x3c05), .driver_info = (unsigned long) &ax88772_info, }, { // DLink DUB-E100 H/W Ver B1 Alternate USB_DEVICE (0x2001, 0x3c05), .driver_info = (unsigned long) &ax88772_info, }, { // DLink DUB-E100 H/W Ver C1 USB_DEVICE (0x2001, 0x1a02), .driver_info = (unsigned long) &ax88772_info, }, { // Linksys USB1000 USB_DEVICE (0x1737, 0x0039), .driver_info = (unsigned long) &ax88178_info, }, { // IO-DATA ETG-US2 USB_DEVICE (0x04bb, 0x0930), .driver_info = (unsigned long) &ax88178_info, }, { // Belkin F5D5055 USB_DEVICE(0x050d, 0x5055), .driver_info = (unsigned long) &ax88178_info, }, { // Apple USB Ethernet Adapter USB_DEVICE(0x05ac, 0x1402), .driver_info = (unsigned long) &ax88772_info, }, { // Cables-to-Go USB Ethernet Adapter USB_DEVICE(0x0b95, 0x772a), .driver_info = (unsigned long) &ax88772_info, }, { // ABOCOM for pci USB_DEVICE(0x14ea, 0xab11), .driver_info = (unsigned long) &ax88178_info, }, { // ASIX 88772a USB_DEVICE(0x0db0, 0xa877), .driver_info = (unsigned long) &ax88772_info, }, { // Asus USB Ethernet Adapter USB_DEVICE (0x0b95, 0x7e2b), .driver_info = (unsigned long)&ax88772b_info, }, { /* ASIX 88172a demo board */ USB_DEVICE(0x0b95, 0x172a), .driver_info = (unsigned long) &ax88172a_info, }, { /* * USBLINK HG20F9 "USB 2.0 LAN" * Appears to have gazumped Linksys's manufacturer ID but * doesn't (yet) conflict with any known Linksys product. */ USB_DEVICE(0x066b, 0x20f9), .driver_info = (unsigned long) &hg20f9_info, }, { // Linux Automation GmbH USB 10Base-T1L USB_DEVICE(0x33f7, 0x0004), .driver_info = (unsigned long) &lxausb_t1l_info, }, { /* LyconSys FiberGecko 100 */ USB_DEVICE(0x1d2a, 0x0801), .driver_info = (unsigned long) &lyconsys_fibergecko100_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver asix_driver = { .name = DRIVER_NAME, .id_table = products, .probe = usbnet_probe, .suspend = asix_suspend, .resume = asix_resume, .reset_resume = asix_resume, .disconnect = usbnet_disconnect, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(asix_driver); MODULE_AUTHOR("David Hollis"); MODULE_VERSION(DRIVER_VERSION); MODULE_DESCRIPTION("ASIX AX8817X based USB 2.0 Ethernet Devices"); MODULE_LICENSE("GPL");
120 120 123 122 123 123 123 431 119 120 2 1 120 120 103 17 103 120 103 120 118 7 113 7 112 7 113 7 113 7 113 7 113 7 113 7 113 120 119 120 119 120 117 120 120 17 119 103 103 103 103 102 103 103 102 103 102 103 103 103 102 101 103 102 103 103 103 103 103 103 119 119 120 120 6 6 6 6 1 1 1 1 120 109 109 109 109 109 109 109 109 109 109 67 120 103 103 102 103 103 103 103 103 102 102 103 103 103 103 103 103 103 103 102 103 103 120 3 3 120 118 117 118 120 120 119 120 120 120 120 119 120 118 120 58 120 119 120 120 2 120 120 120 120 120 120 118 119 120 120 120 120 119 119 123 123 123 50 123 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 // SPDX-License-Identifier: GPL-2.0-or-later /* * Video capture interface for Linux version 2 * * A generic video device interface for the LINUX operating system * using a set of device structures/vectors for low level operations. * * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1) * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2) * * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com> * - Added procfs support */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/debugfs.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> #define VIDEO_NUM_DEVICES 256 #define VIDEO_NAME "video4linux" #define dprintk(fmt, arg...) do { \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) /* * sysfs stuff */ static ssize_t index_show(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = to_video_device(cd); return sprintf(buf, "%i\n", vdev->index); } static DEVICE_ATTR_RO(index); static ssize_t dev_debug_show(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = to_video_device(cd); return sprintf(buf, "%i\n", vdev->dev_debug); } static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr, const char *buf, size_t len) { struct video_device *vdev = to_video_device(cd); int res = 0; u16 value; res = kstrtou16(buf, 0, &value); if (res) return res; vdev->dev_debug = value; return len; } static DEVICE_ATTR_RW(dev_debug); static ssize_t name_show(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = to_video_device(cd); return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name); } static DEVICE_ATTR_RO(name); static struct attribute *video_device_attrs[] = { &dev_attr_name.attr, &dev_attr_dev_debug.attr, &dev_attr_index.attr, NULL, }; ATTRIBUTE_GROUPS(video_device); static struct dentry *v4l2_debugfs_root_dir; /* * Active devices */ static struct video_device *video_devices[VIDEO_NUM_DEVICES]; static DEFINE_MUTEX(videodev_lock); static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES); /* Device node utility functions */ /* Note: these utility functions all assume that vfl_type is in the range [0, VFL_TYPE_MAX-1]. */ #ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES /* Return the bitmap corresponding to vfl_type. */ static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) { /* Any types not assigned to fixed minor ranges must be mapped to one single bitmap for the purposes of finding a free node number since all those unassigned types use the same minor range. */ int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type; return devnode_nums[idx]; } #else /* Return the bitmap corresponding to vfl_type. */ static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type) { return devnode_nums[vfl_type]; } #endif /* Mark device node number vdev->num as used */ static inline void devnode_set(struct video_device *vdev) { set_bit(vdev->num, devnode_bits(vdev->vfl_type)); } /* Mark device node number vdev->num as unused */ static inline void devnode_clear(struct video_device *vdev) { clear_bit(vdev->num, devnode_bits(vdev->vfl_type)); } /* Try to find a free device node number in the range [from, to> */ static inline int devnode_find(struct video_device *vdev, int from, int to) { return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from); } struct video_device *video_device_alloc(void) { return kzalloc(sizeof(struct video_device), GFP_KERNEL); } EXPORT_SYMBOL(video_device_alloc); void video_device_release(struct video_device *vdev) { kfree(vdev); } EXPORT_SYMBOL(video_device_release); void video_device_release_empty(struct video_device *vdev) { /* Do nothing */ /* Only valid when the video_device struct is a static. */ } EXPORT_SYMBOL(video_device_release_empty); static inline void video_get(struct video_device *vdev) { get_device(&vdev->dev); } static inline void video_put(struct video_device *vdev) { put_device(&vdev->dev); } /* Called when the last user of the video device exits. */ static void v4l2_device_release(struct device *cd) { struct video_device *vdev = to_video_device(cd); struct v4l2_device *v4l2_dev = vdev->v4l2_dev; mutex_lock(&videodev_lock); if (WARN_ON(video_devices[vdev->minor] != vdev)) { /* should not happen */ mutex_unlock(&videodev_lock); return; } /* Free up this device for reuse */ video_devices[vdev->minor] = NULL; /* Delete the cdev on this minor as well */ cdev_del(vdev->cdev); /* Just in case some driver tries to access this from the release() callback. */ vdev->cdev = NULL; /* Mark device node number as free */ devnode_clear(vdev); mutex_unlock(&videodev_lock); #if defined(CONFIG_MEDIA_CONTROLLER) if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) { /* Remove interfaces and interface links */ media_devnode_remove(vdev->intf_devnode); if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) media_device_unregister_entity(&vdev->entity); } #endif /* Do not call v4l2_device_put if there is no release callback set. * Drivers that have no v4l2_device release callback might free the * v4l2_dev instance in the video_device release callback below, so we * must perform this check here. * * TODO: In the long run all drivers that use v4l2_device should use the * v4l2_device release callback. This check will then be unnecessary. */ if (v4l2_dev->release == NULL) v4l2_dev = NULL; /* Release video_device and perform other cleanups as needed. */ vdev->release(vdev); /* Decrease v4l2_device refcount */ if (v4l2_dev) v4l2_device_put(v4l2_dev); } static const struct class video_class = { .name = VIDEO_NAME, .dev_groups = video_device_groups, }; struct video_device *video_devdata(struct file *file) { return video_devices[iminor(file_inode(file))]; } EXPORT_SYMBOL(video_devdata); /* Priority handling */ static inline bool prio_is_valid(enum v4l2_priority prio) { return prio == V4L2_PRIORITY_BACKGROUND || prio == V4L2_PRIORITY_INTERACTIVE || prio == V4L2_PRIORITY_RECORD; } void v4l2_prio_init(struct v4l2_prio_state *global) { memset(global, 0, sizeof(*global)); } EXPORT_SYMBOL(v4l2_prio_init); int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, enum v4l2_priority new) { if (!prio_is_valid(new)) return -EINVAL; if (*local == new) return 0; atomic_inc(&global->prios[new]); if (prio_is_valid(*local)) atomic_dec(&global->prios[*local]); *local = new; return 0; } EXPORT_SYMBOL(v4l2_prio_change); void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local) { v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT); } EXPORT_SYMBOL(v4l2_prio_open); void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local) { if (prio_is_valid(local)) atomic_dec(&global->prios[local]); } EXPORT_SYMBOL(v4l2_prio_close); enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global) { if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0) return V4L2_PRIORITY_RECORD; if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0) return V4L2_PRIORITY_INTERACTIVE; if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0) return V4L2_PRIORITY_BACKGROUND; return V4L2_PRIORITY_UNSET; } EXPORT_SYMBOL(v4l2_prio_max); int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local) { return (local < v4l2_prio_max(global)) ? -EBUSY : 0; } EXPORT_SYMBOL(v4l2_prio_check); static ssize_t v4l2_read(struct file *filp, char __user *buf, size_t sz, loff_t *off) { struct video_device *vdev = video_devdata(filp); int ret = -ENODEV; if (!vdev->fops->read) return -EINVAL; if (video_is_registered(vdev)) ret = vdev->fops->read(filp, buf, sz, off); if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) dprintk("%s: read: %zd (%d)\n", video_device_node_name(vdev), sz, ret); return ret; } static ssize_t v4l2_write(struct file *filp, const char __user *buf, size_t sz, loff_t *off) { struct video_device *vdev = video_devdata(filp); int ret = -ENODEV; if (!vdev->fops->write) return -EINVAL; if (video_is_registered(vdev)) ret = vdev->fops->write(filp, buf, sz, off); if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) && (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING)) dprintk("%s: write: %zd (%d)\n", video_device_node_name(vdev), sz, ret); return ret; } static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll) { struct video_device *vdev = video_devdata(filp); __poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI; if (video_is_registered(vdev)) { if (!vdev->fops->poll) res = DEFAULT_POLLMASK; else res = vdev->fops->poll(filp, poll); } if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL) dprintk("%s: poll: %08x %08x\n", video_device_node_name(vdev), res, poll_requested_events(poll)); return res; } static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct video_device *vdev = video_devdata(filp); int ret = -ENODEV; if (vdev->fops->unlocked_ioctl) { if (video_is_registered(vdev)) ret = vdev->fops->unlocked_ioctl(filp, cmd, arg); } else ret = -ENOTTY; return ret; } #ifdef CONFIG_MMU #define v4l2_get_unmapped_area NULL #else static unsigned long v4l2_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct video_device *vdev = video_devdata(filp); int ret; if (!vdev->fops->get_unmapped_area) return -ENOSYS; if (!video_is_registered(vdev)) return -ENODEV; ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags); if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) dprintk("%s: get_unmapped_area (%d)\n", video_device_node_name(vdev), ret); return ret; } #endif static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm) { struct video_device *vdev = video_devdata(filp); int ret = -ENODEV; if (!vdev->fops->mmap) return -ENODEV; if (video_is_registered(vdev)) ret = vdev->fops->mmap(filp, vm); if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) dprintk("%s: mmap (%d)\n", video_device_node_name(vdev), ret); return ret; } /* Override for the open function */ static int v4l2_open(struct inode *inode, struct file *filp) { struct video_device *vdev; int ret = 0; /* Check if the video device is available */ mutex_lock(&videodev_lock); vdev = video_devdata(filp); /* return ENODEV if the video device has already been removed. */ if (vdev == NULL || !video_is_registered(vdev)) { mutex_unlock(&videodev_lock); return -ENODEV; } /* and increase the device refcount */ video_get(vdev); mutex_unlock(&videodev_lock); if (vdev->fops->open) { if (video_is_registered(vdev)) ret = vdev->fops->open(filp); else ret = -ENODEV; } if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) dprintk("%s: open (%d)\n", video_device_node_name(vdev), ret); /* decrease the refcount in case of an error */ if (ret) video_put(vdev); return ret; } /* Override for the release function */ static int v4l2_release(struct inode *inode, struct file *filp) { struct video_device *vdev = video_devdata(filp); int ret = 0; /* * We need to serialize the release() with queueing new requests. * The release() may trigger the cancellation of a streaming * operation, and that should not be mixed with queueing a new * request at the same time. */ if (vdev->fops->release) { if (v4l2_device_supports_requests(vdev->v4l2_dev)) { mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex); ret = vdev->fops->release(filp); mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex); } else { ret = vdev->fops->release(filp); } } if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP) dprintk("%s: release\n", video_device_node_name(vdev)); /* decrease the refcount unconditionally since the release() return value is ignored. */ video_put(vdev); return ret; } static const struct file_operations v4l2_fops = { .owner = THIS_MODULE, .read = v4l2_read, .write = v4l2_write, .open = v4l2_open, .get_unmapped_area = v4l2_get_unmapped_area, .mmap = v4l2_mmap, .unlocked_ioctl = v4l2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = v4l2_compat_ioctl32, #endif .release = v4l2_release, .poll = v4l2_poll, }; /** * get_index - assign stream index number based on v4l2_dev * @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned * * Note that when this is called the new device has not yet been registered * in the video_device array, but it was able to obtain a minor number. * * This means that we can always obtain a free stream index number since * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in * use of the video_device array. * * Returns a free index number. */ static int get_index(struct video_device *vdev) { /* This can be static since this function is called with the global videodev_lock held. */ static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES); int i; bitmap_zero(used, VIDEO_NUM_DEVICES); for (i = 0; i < VIDEO_NUM_DEVICES; i++) { if (video_devices[i] != NULL && video_devices[i]->v4l2_dev == vdev->v4l2_dev) { __set_bit(video_devices[i]->index, used); } } return find_first_zero_bit(used, VIDEO_NUM_DEVICES); } #define SET_VALID_IOCTL(ops, cmd, op) \ do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0) /* This determines which ioctls are actually implemented in the driver. It's a one-time thing which simplifies video_ioctl2 as it can just do a bit test. Note that drivers can override this by setting bits to 1 in vdev->valid_ioctls. If an ioctl is marked as 1 when this function is called, then that ioctl will actually be marked as unimplemented. It does that by first setting up the local valid_ioctls bitmap, and at the end do a: vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls) */ static void determine_valid_ioctls(struct video_device *vdev) { const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE; const u32 meta_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_META_OUTPUT; DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE); const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops; bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO && (vdev->device_caps & vid_caps); bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI; bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO; bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR; bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH; bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO && (vdev->device_caps & meta_caps); bool is_rx = vdev->vfl_dir != VFL_DIR_TX; bool is_tx = vdev->vfl_dir != VFL_DIR_RX; bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC; bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING; bool is_edid = vdev->device_caps & V4L2_CAP_EDID; bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE); /* vfl_type and vfl_dir independent ioctls */ SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap); __set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls); /* Note: the control handler can also be passed through the filehandle, and that can't be tested here. If the bit for these control ioctls is set, then the ioctl is valid. But if it is 0, then it can still be valid if the filehandle passed the control handler. */ if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl) __set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls); if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl) __set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls); if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls) __set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls); if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls) __set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls); if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls) __set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls); if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls) __set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls); if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls) __set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls); if (vdev->ctrl_handler || ops->vidioc_querymenu) __set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls); if (!is_tch) { SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency); SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency); } SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status); #ifdef CONFIG_VIDEO_ADV_DEBUG __set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls); #endif /* yes, really vidioc_subscribe_event */ SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event); SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event); SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event); if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator) __set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls); if (is_vid) { /* video specific ioctls */ if ((is_rx && (ops->vidioc_enum_fmt_vid_cap || ops->vidioc_enum_fmt_vid_overlay)) || (is_tx && ops->vidioc_enum_fmt_vid_out)) __set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls); if ((is_rx && (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane || ops->vidioc_g_fmt_vid_overlay)) || (is_tx && (ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane || ops->vidioc_g_fmt_vid_out_overlay))) __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); if ((is_rx && (ops->vidioc_s_fmt_vid_cap || ops->vidioc_s_fmt_vid_cap_mplane || ops->vidioc_s_fmt_vid_overlay)) || (is_tx && (ops->vidioc_s_fmt_vid_out || ops->vidioc_s_fmt_vid_out_mplane || ops->vidioc_s_fmt_vid_out_overlay))) __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); if ((is_rx && (ops->vidioc_try_fmt_vid_cap || ops->vidioc_try_fmt_vid_cap_mplane || ops->vidioc_try_fmt_vid_overlay)) || (is_tx && (ops->vidioc_try_fmt_vid_out || ops->vidioc_try_fmt_vid_out_mplane || ops->vidioc_try_fmt_vid_out_overlay))) __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay); SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf); SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf); SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp); SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp); SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index); SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd); SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd); SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd); SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd); SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); if (ops->vidioc_g_selection && !test_bit(_IOC_NR(VIDIOC_G_SELECTION), vdev->valid_ioctls)) { __set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls); } if (ops->vidioc_s_selection && !test_bit(_IOC_NR(VIDIOC_S_SELECTION), vdev->valid_ioctls)) __set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls); SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection); SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection); } if (is_meta && is_rx) { /* metadata capture specific ioctls */ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap); SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap); SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap); SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap); } else if (is_meta && is_tx) { /* metadata output specific ioctls */ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out); SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out); SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out); SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out); } if (is_vbi) { /* vbi specific ioctls */ if ((is_rx && (ops->vidioc_g_fmt_vbi_cap || ops->vidioc_g_fmt_sliced_vbi_cap)) || (is_tx && (ops->vidioc_g_fmt_vbi_out || ops->vidioc_g_fmt_sliced_vbi_out))) __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls); if ((is_rx && (ops->vidioc_s_fmt_vbi_cap || ops->vidioc_s_fmt_sliced_vbi_cap)) || (is_tx && (ops->vidioc_s_fmt_vbi_out || ops->vidioc_s_fmt_sliced_vbi_out))) __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls); if ((is_rx && (ops->vidioc_try_fmt_vbi_cap || ops->vidioc_try_fmt_sliced_vbi_cap)) || (is_tx && (ops->vidioc_try_fmt_vbi_out || ops->vidioc_try_fmt_sliced_vbi_out))) __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls); SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap); } else if (is_tch) { /* touch specific ioctls */ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap); SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap); SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap); SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap); SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes); SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals); SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm); SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); } else if (is_sdr && is_rx) { /* SDR receiver specific ioctls */ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap); SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap); SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap); SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap); } else if (is_sdr && is_tx) { /* SDR transmitter specific ioctls */ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out); SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out); SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out); SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out); } if (has_streaming) { /* ioctls valid for streaming I/O */ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs); SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf); SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf); SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf); SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf); SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs); SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf); SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon); SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff); /* VIDIOC_CREATE_BUFS support is mandatory to enable VIDIOC_REMOVE_BUFS */ if (ops->vidioc_create_bufs) SET_VALID_IOCTL(ops, VIDIOC_REMOVE_BUFS, vidioc_remove_bufs); } if (is_vid || is_vbi || is_meta) { /* ioctls valid for video, vbi and metadata */ if (ops->vidioc_s_std) __set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls); SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std); SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std); if (is_rx) { SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd); if (is_io_mc) { __set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls); } else { SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); } SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio); SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio); SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio); SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings); SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); } if (is_tx) { if (is_io_mc) { __set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls); __set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls); } else { SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); } SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout); SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout); SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout); } if (ops->vidioc_g_parm || ops->vidioc_g_std) __set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls); SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm); SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings); SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings); SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings); SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap); SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); } if (is_tx && (is_radio || is_sdr)) { /* radio transmitter only ioctls */ SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator); SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator); } if (is_rx && !is_tch) { /* receiver only ioctls */ SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner); SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner); SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek); } if (is_edid) { SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid); if (is_tx) { SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output); SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output); SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output); } if (is_rx) { SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input); SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input); SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input); SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid); } } bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls, BASE_VIDIOC_PRIVATE); } static int video_register_media_controller(struct video_device *vdev) { #if defined(CONFIG_MEDIA_CONTROLLER) u32 intf_type; int ret; /* Memory-to-memory devices are more complex and use * their own function to register its mc entities. */ if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M) return 0; vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE; vdev->entity.function = MEDIA_ENT_F_UNKNOWN; switch (vdev->vfl_type) { case VFL_TYPE_VIDEO: intf_type = MEDIA_INTF_T_V4L_VIDEO; vdev->entity.function = MEDIA_ENT_F_IO_V4L; break; case VFL_TYPE_VBI: intf_type = MEDIA_INTF_T_V4L_VBI; vdev->entity.function = MEDIA_ENT_F_IO_VBI; break; case VFL_TYPE_SDR: intf_type = MEDIA_INTF_T_V4L_SWRADIO; vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO; break; case VFL_TYPE_TOUCH: intf_type = MEDIA_INTF_T_V4L_TOUCH; vdev->entity.function = MEDIA_ENT_F_IO_V4L; break; case VFL_TYPE_RADIO: intf_type = MEDIA_INTF_T_V4L_RADIO; /* * Radio doesn't have an entity at the V4L2 side to represent * radio input or output. Instead, the audio input/output goes * via either physical wires or ALSA. */ break; case VFL_TYPE_SUBDEV: intf_type = MEDIA_INTF_T_V4L_SUBDEV; /* Entity will be created via v4l2_device_register_subdev() */ break; default: return 0; } if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { vdev->entity.name = vdev->name; /* Needed just for backward compatibility with legacy MC API */ vdev->entity.info.dev.major = VIDEO_MAJOR; vdev->entity.info.dev.minor = vdev->minor; ret = media_device_register_entity(vdev->v4l2_dev->mdev, &vdev->entity); if (ret < 0) { pr_warn("%s: media_device_register_entity failed\n", __func__); return ret; } } vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev, intf_type, 0, VIDEO_MAJOR, vdev->minor); if (!vdev->intf_devnode) { media_device_unregister_entity(&vdev->entity); return -ENOMEM; } if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) { struct media_link *link; link = media_create_intf_link(&vdev->entity, &vdev->intf_devnode->intf, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (!link) { media_devnode_remove(vdev->intf_devnode); media_device_unregister_entity(&vdev->entity); return -ENOMEM; } } /* FIXME: how to create the other interface links? */ #endif return 0; } int __video_register_device(struct video_device *vdev, enum vfl_devnode_type type, int nr, int warn_if_nr_in_use, struct module *owner) { int i = 0; int ret; int minor_offset = 0; int minor_cnt = VIDEO_NUM_DEVICES; const char *name_base; /* A minor value of -1 marks this video device as never having been registered */ vdev->minor = -1; /* the release callback MUST be present */ if (WARN_ON(!vdev->release)) return -EINVAL; /* the v4l2_dev pointer MUST be present */ if (WARN_ON(!vdev->v4l2_dev)) return -EINVAL; /* the device_caps field MUST be set for all but subdevs */ if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps)) return -EINVAL; /* v4l2_fh support */ spin_lock_init(&vdev->fh_lock); INIT_LIST_HEAD(&vdev->fh_list); /* Part 1: check device type */ switch (type) { case VFL_TYPE_VIDEO: name_base = "video"; break; case VFL_TYPE_VBI: name_base = "vbi"; break; case VFL_TYPE_RADIO: name_base = "radio"; break; case VFL_TYPE_SUBDEV: name_base = "v4l-subdev"; break; case VFL_TYPE_SDR: /* Use device name 'swradio' because 'sdr' was already taken. */ name_base = "swradio"; break; case VFL_TYPE_TOUCH: name_base = "v4l-touch"; break; default: pr_err("%s called with unknown type: %d\n", __func__, type); return -EINVAL; } vdev->vfl_type = type; vdev->cdev = NULL; if (vdev->dev_parent == NULL) vdev->dev_parent = vdev->v4l2_dev->dev; if (vdev->ctrl_handler == NULL) vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler; /* If the prio state pointer is NULL, then use the v4l2_device prio state. */ if (vdev->prio == NULL) vdev->prio = &vdev->v4l2_dev->prio; /* Part 2: find a free minor, device node number and device index. */ #ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES /* Keep the ranges for the first four types for historical * reasons. * Newer devices (not yet in place) should use the range * of 128-191 and just pick the first free minor there * (new style). */ switch (type) { case VFL_TYPE_VIDEO: minor_offset = 0; minor_cnt = 64; break; case VFL_TYPE_RADIO: minor_offset = 64; minor_cnt = 64; break; case VFL_TYPE_VBI: minor_offset = 224; minor_cnt = 32; break; default: minor_offset = 128; minor_cnt = 64; break; } #endif /* Pick a device node number */ mutex_lock(&videodev_lock); nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt); if (nr == minor_cnt) nr = devnode_find(vdev, 0, minor_cnt); if (nr == minor_cnt) { pr_err("could not get a free device node number\n"); mutex_unlock(&videodev_lock); return -ENFILE; } #ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES /* 1-on-1 mapping of device node number to minor number */ i = nr; #else /* The device node number and minor numbers are independent, so we just find the first free minor number. */ for (i = 0; i < VIDEO_NUM_DEVICES; i++) if (video_devices[i] == NULL) break; if (i == VIDEO_NUM_DEVICES) { mutex_unlock(&videodev_lock); pr_err("could not get a free minor\n"); return -ENFILE; } #endif vdev->minor = i + minor_offset; vdev->num = nr; /* Should not happen since we thought this minor was free */ if (WARN_ON(video_devices[vdev->minor])) { mutex_unlock(&videodev_lock); pr_err("video_device not empty!\n"); return -ENFILE; } devnode_set(vdev); vdev->index = get_index(vdev); video_devices[vdev->minor] = vdev; mutex_unlock(&videodev_lock); if (vdev->ioctl_ops) determine_valid_ioctls(vdev); /* Part 3: Initialize the character device */ vdev->cdev = cdev_alloc(); if (vdev->cdev == NULL) { ret = -ENOMEM; goto cleanup; } vdev->cdev->ops = &v4l2_fops; vdev->cdev->owner = owner; ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1); if (ret < 0) { pr_err("%s: cdev_add failed\n", __func__); kfree(vdev->cdev); vdev->cdev = NULL; goto cleanup; } /* Part 4: register the device with sysfs */ vdev->dev.class = &video_class; vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); vdev->dev.parent = vdev->dev_parent; dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); mutex_lock(&videodev_lock); ret = device_register(&vdev->dev); if (ret < 0) { mutex_unlock(&videodev_lock); pr_err("%s: device_register failed\n", __func__); goto cleanup; } /* Register the release callback that will be called when the last reference to the device goes away. */ vdev->dev.release = v4l2_device_release; if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) pr_warn("%s: requested %s%d, got %s\n", __func__, name_base, nr, video_device_node_name(vdev)); /* Increase v4l2_device refcount */ v4l2_device_get(vdev->v4l2_dev); /* Part 5: Register the entity. */ ret = video_register_media_controller(vdev); /* Part 6: Activate this minor. The char device can now be used. */ set_bit(V4L2_FL_REGISTERED, &vdev->flags); mutex_unlock(&videodev_lock); return 0; cleanup: mutex_lock(&videodev_lock); if (vdev->cdev) cdev_del(vdev->cdev); video_devices[vdev->minor] = NULL; devnode_clear(vdev); mutex_unlock(&videodev_lock); /* Mark this video device as never having been registered. */ vdev->minor = -1; return ret; } EXPORT_SYMBOL(__video_register_device); /** * video_unregister_device - unregister a video4linux device * @vdev: the device to unregister * * This unregisters the passed device. Future open calls will * be met with errors. */ void video_unregister_device(struct video_device *vdev) { /* Check if vdev was ever registered at all */ if (!vdev || !video_is_registered(vdev)) return; mutex_lock(&videodev_lock); /* This must be in a critical section to prevent a race with v4l2_open. * Once this bit has been cleared video_get may never be called again. */ clear_bit(V4L2_FL_REGISTERED, &vdev->flags); mutex_unlock(&videodev_lock); if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags)) v4l2_event_wake_all(vdev); device_unregister(&vdev->dev); } EXPORT_SYMBOL(video_unregister_device); #ifdef CONFIG_DEBUG_FS struct dentry *v4l2_debugfs_root(void) { if (!v4l2_debugfs_root_dir) v4l2_debugfs_root_dir = debugfs_create_dir("v4l2", NULL); return v4l2_debugfs_root_dir; } EXPORT_SYMBOL_GPL(v4l2_debugfs_root); #endif #if defined(CONFIG_MEDIA_CONTROLLER) __must_check int video_device_pipeline_start(struct video_device *vdev, struct media_pipeline *pipe) { struct media_entity *entity = &vdev->entity; if (entity->num_pads != 1) return -ENODEV; return media_pipeline_start(&entity->pads[0], pipe); } EXPORT_SYMBOL_GPL(video_device_pipeline_start); __must_check int __video_device_pipeline_start(struct video_device *vdev, struct media_pipeline *pipe) { struct media_entity *entity = &vdev->entity; if (entity->num_pads != 1) return -ENODEV; return __media_pipeline_start(&entity->pads[0], pipe); } EXPORT_SYMBOL_GPL(__video_device_pipeline_start); void video_device_pipeline_stop(struct video_device *vdev) { struct media_entity *entity = &vdev->entity; if (WARN_ON(entity->num_pads != 1)) return; return media_pipeline_stop(&entity->pads[0]); } EXPORT_SYMBOL_GPL(video_device_pipeline_stop); void __video_device_pipeline_stop(struct video_device *vdev) { struct media_entity *entity = &vdev->entity; if (WARN_ON(entity->num_pads != 1)) return; return __media_pipeline_stop(&entity->pads[0]); } EXPORT_SYMBOL_GPL(__video_device_pipeline_stop); __must_check int video_device_pipeline_alloc_start(struct video_device *vdev) { struct media_entity *entity = &vdev->entity; if (entity->num_pads != 1) return -ENODEV; return media_pipeline_alloc_start(&entity->pads[0]); } EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start); struct media_pipeline *video_device_pipeline(struct video_device *vdev) { struct media_entity *entity = &vdev->entity; if (WARN_ON(entity->num_pads != 1)) return NULL; return media_pad_pipeline(&entity->pads[0]); } EXPORT_SYMBOL_GPL(video_device_pipeline); #endif /* CONFIG_MEDIA_CONTROLLER */ /* * Initialise video for linux */ static int __init videodev_init(void) { dev_t dev = MKDEV(VIDEO_MAJOR, 0); int ret; pr_info("Linux video capture interface: v2.00\n"); ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME); if (ret < 0) { pr_warn("videodev: unable to get major %d\n", VIDEO_MAJOR); return ret; } ret = class_register(&video_class); if (ret < 0) { unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); pr_warn("video_dev: class_register failed\n"); return -EIO; } return 0; } static void __exit videodev_exit(void) { dev_t dev = MKDEV(VIDEO_MAJOR, 0); class_unregister(&video_class); unregister_chrdev_region(dev, VIDEO_NUM_DEVICES); debugfs_remove_recursive(v4l2_debugfs_root_dir); v4l2_debugfs_root_dir = NULL; } subsys_initcall(videodev_init); module_exit(videodev_exit) MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>, Bill Dirks, Justin Schoeman, Gerd Knorr"); MODULE_DESCRIPTION("Video4Linux2 core driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM capability #if !defined(_TRACE_CAPABILITY_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_CAPABILITY_H #include <linux/cred.h> #include <linux/tracepoint.h> #include <linux/user_namespace.h> /** * cap_capable - called after it's determined if a task has a particular * effective capability * * @cred: The credentials used * @target_ns: The user namespace of the resource being accessed * @capable_ns: The user namespace in which the credential provides the * capability to access the targeted resource. * This will be NULL if ret is not 0. * @cap: The capability to check for * @ret: The return value of the check: 0 if it does, -ve if it does not * * Allows to trace calls to cap_capable in commoncap.c */ TRACE_EVENT(cap_capable, TP_PROTO(const struct cred *cred, struct user_namespace *target_ns, const struct user_namespace *capable_ns, int cap, int ret), TP_ARGS(cred, target_ns, capable_ns, cap, ret), TP_STRUCT__entry( __field(const struct cred *, cred) __field(struct user_namespace *, target_ns) __field(const struct user_namespace *, capable_ns) __field(int, cap) __field(int, ret) ), TP_fast_assign( __entry->cred = cred; __entry->target_ns = target_ns; __entry->capable_ns = ret == 0 ? capable_ns : NULL; __entry->cap = cap; __entry->ret = ret; ), TP_printk("cred %p, target_ns %p, capable_ns %p, cap %d, ret %d", __entry->cred, __entry->target_ns, __entry->capable_ns, __entry->cap, __entry->ret) ); #endif /* _TRACE_CAPABILITY_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
304 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MNT_IDMAPPING_H #define _LINUX_MNT_IDMAPPING_H #include <linux/types.h> #include <linux/uidgid.h> struct mnt_idmap; struct user_namespace; extern struct mnt_idmap nop_mnt_idmap; extern struct mnt_idmap invalid_mnt_idmap; extern struct user_namespace init_user_ns; typedef struct { uid_t val; } vfsuid_t; typedef struct { gid_t val; } vfsgid_t; static_assert(sizeof(vfsuid_t) == sizeof(kuid_t)); static_assert(sizeof(vfsgid_t) == sizeof(kgid_t)); static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val)); static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val)); static inline bool is_valid_mnt_idmap(const struct mnt_idmap *idmap) { return idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap; } #ifdef CONFIG_MULTIUSER static inline uid_t __vfsuid_val(vfsuid_t uid) { return uid.val; } static inline gid_t __vfsgid_val(vfsgid_t gid) { return gid.val; } #else static inline uid_t __vfsuid_val(vfsuid_t uid) { return 0; } static inline gid_t __vfsgid_val(vfsgid_t gid) { return 0; } #endif static inline bool vfsuid_valid(vfsuid_t uid) { return __vfsuid_val(uid) != (uid_t)-1; } static inline bool vfsgid_valid(vfsgid_t gid) { return __vfsgid_val(gid) != (gid_t)-1; } static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right) { return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right); } static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right) { return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right); } /** * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value * @vfsuid: the vfsuid to compare * @kuid: the kuid to compare * * Check whether @vfsuid and @kuid have the same values. * * Return: true if @vfsuid and @kuid have the same value, false if not. * Comparison between two invalid uids returns false. */ static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid) { return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid); } /** * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value * @vfsgid: the vfsgid to compare * @kgid: the kgid to compare * * Check whether @vfsgid and @kgid have the same values. * * Return: true if @vfsgid and @kgid have the same value, false if not. * Comparison between two invalid gids returns false. */ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid) { return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid); } /* * vfs{g,u}ids are created from k{g,u}ids. * We don't allow them to be created from regular {u,g}id. */ #define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) } #define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) } #define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID) #define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID) /* * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare * whether the mapped value is identical to value of a k{g,u}id. */ #define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) } #define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) } int vfsgid_in_group_p(vfsgid_t vfsgid); struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap); void mnt_idmap_put(struct mnt_idmap *idmap); vfsuid_t make_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kuid_t kuid); vfsgid_t make_vfsgid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kgid_t kgid); kuid_t from_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsuid_t vfsuid); kgid_t from_vfsgid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsgid_t vfsgid); /** * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @vfsuid: vfsuid to be mapped * * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this * function to check whether the filesystem idmapping has a mapping for * @vfsuid. * * Return: true if @vfsuid has a mapping in the filesystem, false if not. */ static inline bool vfsuid_has_fsmapping(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsuid_t vfsuid) { return uid_valid(from_vfsuid(idmap, fs_userns, vfsuid)); } static inline bool vfsuid_has_mapping(struct user_namespace *userns, vfsuid_t vfsuid) { return from_kuid(userns, AS_KUIDT(vfsuid)) != (uid_t)-1; } /** * vfsuid_into_kuid - convert vfsuid into kuid * @vfsuid: the vfsuid to convert * * This can be used when a vfsuid is committed as a kuid. * * Return: a kuid with the value of @vfsuid */ static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid) { return AS_KUIDT(vfsuid); } /** * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @vfsgid: vfsgid to be mapped * * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this * function to check whether the filesystem idmapping has a mapping for * @vfsgid. * * Return: true if @vfsgid has a mapping in the filesystem, false if not. */ static inline bool vfsgid_has_fsmapping(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsgid_t vfsgid) { return gid_valid(from_vfsgid(idmap, fs_userns, vfsgid)); } static inline bool vfsgid_has_mapping(struct user_namespace *userns, vfsgid_t vfsgid) { return from_kgid(userns, AS_KGIDT(vfsgid)) != (gid_t)-1; } /** * vfsgid_into_kgid - convert vfsgid into kgid * @vfsgid: the vfsgid to convert * * This can be used when a vfsgid is committed as a kgid. * * Return: a kgid with the value of @vfsgid */ static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid) { return AS_KGIDT(vfsgid); } /** * mapped_fsuid - return caller's fsuid mapped according to an idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * * Use this helper to initialize a new vfs or filesystem object based on * the caller's fsuid. A common example is initializing the i_uid field of * a newly allocated inode triggered by a creation event such as mkdir or * O_CREAT. Other examples include the allocation of quotas for a specific * user. * * Return: the caller's current fsuid mapped up according to @idmap. */ static inline kuid_t mapped_fsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns) { return from_vfsuid(idmap, fs_userns, VFSUIDT_INIT(current_fsuid())); } /** * mapped_fsgid - return caller's fsgid mapped according to an idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * * Use this helper to initialize a new vfs or filesystem object based on * the caller's fsgid. A common example is initializing the i_gid field of * a newly allocated inode triggered by a creation event such as mkdir or * O_CREAT. Other examples include the allocation of quotas for a specific * user. * * Return: the caller's current fsgid mapped up according to @idmap. */ static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap, struct user_namespace *fs_userns) { return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid())); } #endif /* _LINUX_MNT_IDMAPPING_H */
14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef IOPRIO_H #define IOPRIO_H #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/iocontext.h> #include <uapi/linux/ioprio.h> /* * Default IO priority. */ #define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0) /* * Check that a priority value has a valid class. */ static inline bool ioprio_valid(unsigned short ioprio) { unsigned short class = IOPRIO_PRIO_CLASS(ioprio); return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE; } /* * if process has set io priority explicitly, use that. if not, convert * the cpu scheduler nice value to an io priority */ static inline int task_nice_ioprio(struct task_struct *task) { return (task_nice(task) + 20) / 5; } /* * This is for the case where the task hasn't asked for a specific IO class. * Check for idle and rt task process, and return appropriate IO class. */ static inline int task_nice_ioclass(struct task_struct *task) { if (task->policy == SCHED_IDLE) return IOPRIO_CLASS_IDLE; else if (rt_or_dl_task_policy(task)) return IOPRIO_CLASS_RT; else return IOPRIO_CLASS_BE; } #ifdef CONFIG_BLOCK /* * If the task has set an I/O priority, use that. Otherwise, return * the default I/O priority. * * Expected to be called for current task or with task_lock() held to keep * io_context stable. */ static inline int __get_task_ioprio(struct task_struct *p) { struct io_context *ioc = p->io_context; int prio; if (!ioc) return IOPRIO_DEFAULT; if (p != current) lockdep_assert_held(&p->alloc_lock); prio = ioc->ioprio; if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE) prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p), task_nice_ioprio(p)); return prio; } #else static inline int __get_task_ioprio(struct task_struct *p) { return IOPRIO_DEFAULT; } #endif /* CONFIG_BLOCK */ static inline int get_current_ioprio(void) { return __get_task_ioprio(current); } extern int set_task_ioprio(struct task_struct *task, int ioprio); #ifdef CONFIG_BLOCK extern int ioprio_check_cap(int ioprio); #else static inline int ioprio_check_cap(int ioprio) { return -ENOTBLK; } #endif /* CONFIG_BLOCK */ #endif
12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 /* * Copyright (c) 2007-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" #include "debug.h" #include "hif-ops.h" #define HTC_PACKET_CONTAINER_ALLOCATION 32 #define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH) static int ath6kl_htc_pipe_tx(struct htc_target *handle, struct htc_packet *packet); static void ath6kl_htc_pipe_cleanup(struct htc_target *handle); /* htc pipe tx path */ static inline void restore_tx_packet(struct htc_packet *packet) { if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) { skb_pull(packet->skb, sizeof(struct htc_frame_hdr)); packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF; } } static void do_send_completion(struct htc_endpoint *ep, struct list_head *queue_to_indicate) { struct htc_packet *packet; if (list_empty(queue_to_indicate)) { /* nothing to indicate */ return; } if (ep->ep_cb.tx_comp_multi != NULL) { ath6kl_dbg(ATH6KL_DBG_HTC, "%s: calling ep %d, send complete multiple callback (%d pkts)\n", __func__, ep->eid, get_queue_depth(queue_to_indicate)); /* * a multiple send complete handler is being used, * pass the queue to the handler */ ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate); /* * all packets are now owned by the callback, * reset queue to be safe */ INIT_LIST_HEAD(queue_to_indicate); } else { /* using legacy EpTxComplete */ do { packet = list_first_entry(queue_to_indicate, struct htc_packet, list); list_del(&packet->list); ath6kl_dbg(ATH6KL_DBG_HTC, "%s: calling ep %d send complete callback on packet 0x%p\n", __func__, ep->eid, packet); ep->ep_cb.tx_complete(ep->target, packet); } while (!list_empty(queue_to_indicate)); } } static void send_packet_completion(struct htc_target *target, struct htc_packet *packet) { struct htc_endpoint *ep = &target->endpoint[packet->endpoint]; struct list_head container; restore_tx_packet(packet); INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); /* do completion */ do_send_completion(ep, &container); } static void get_htc_packet_credit_based(struct htc_target *target, struct htc_endpoint *ep, struct list_head *queue) { int credits_required; int remainder; u8 send_flags; struct htc_packet *packet; unsigned int transfer_len; /* NOTE : the TX lock is held when this function is called */ /* loop until we can grab as many packets out of the queue as we can */ while (true) { send_flags = 0; if (list_empty(&ep->txq)) break; /* get packet at head, but don't remove it */ packet = list_first_entry(&ep->txq, struct htc_packet, list); ath6kl_dbg(ATH6KL_DBG_HTC, "%s: got head packet:0x%p , queue depth: %d\n", __func__, packet, get_queue_depth(&ep->txq)); transfer_len = packet->act_len + HTC_HDR_LENGTH; if (transfer_len <= target->tgt_cred_sz) { credits_required = 1; } else { /* figure out how many credits this message requires */ credits_required = transfer_len / target->tgt_cred_sz; remainder = transfer_len % target->tgt_cred_sz; if (remainder) credits_required++; } ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n", __func__, credits_required, ep->cred_dist.credits); if (ep->eid == ENDPOINT_0) { /* * endpoint 0 is special, it always has a credit and * does not require credit based flow control */ credits_required = 0; } else { if (ep->cred_dist.credits < credits_required) break; ep->cred_dist.credits -= credits_required; ep->ep_st.cred_cosumd += credits_required; /* check if we need credits back from the target */ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { /* tell the target we need credits ASAP! */ send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; ep->ep_st.cred_low_indicate += 1; ath6kl_dbg(ATH6KL_DBG_HTC, "%s: host needs credits\n", __func__); } } /* now we can fully dequeue */ packet = list_first_entry(&ep->txq, struct htc_packet, list); list_del(&packet->list); /* save the number of credits this packet consumed */ packet->info.tx.cred_used = credits_required; /* save send flags */ packet->info.tx.flags = send_flags; packet->info.tx.seqno = ep->seqno; ep->seqno++; /* queue this packet into the caller's queue */ list_add_tail(&packet->list, queue); } } static void get_htc_packet(struct htc_target *target, struct htc_endpoint *ep, struct list_head *queue, int resources) { struct htc_packet *packet; /* NOTE : the TX lock is held when this function is called */ /* loop until we can grab as many packets out of the queue as we can */ while (resources) { if (list_empty(&ep->txq)) break; packet = list_first_entry(&ep->txq, struct htc_packet, list); list_del(&packet->list); ath6kl_dbg(ATH6KL_DBG_HTC, "%s: got packet:0x%p , new queue depth: %d\n", __func__, packet, get_queue_depth(&ep->txq)); packet->info.tx.seqno = ep->seqno; packet->info.tx.flags = 0; packet->info.tx.cred_used = 0; ep->seqno++; /* queue this packet into the caller's queue */ list_add_tail(&packet->list, queue); resources--; } } static int htc_issue_packets(struct htc_target *target, struct htc_endpoint *ep, struct list_head *pkt_queue) { int status = 0; u16 payload_len; struct sk_buff *skb; struct htc_frame_hdr *htc_hdr; struct htc_packet *packet; ath6kl_dbg(ATH6KL_DBG_HTC, "%s: queue: 0x%p, pkts %d\n", __func__, pkt_queue, get_queue_depth(pkt_queue)); while (!list_empty(pkt_queue)) { packet = list_first_entry(pkt_queue, struct htc_packet, list); list_del(&packet->list); skb = packet->skb; if (!skb) { WARN_ON_ONCE(1); status = -EINVAL; break; } payload_len = packet->act_len; /* setup HTC frame header */ htc_hdr = skb_push(skb, sizeof(*htc_hdr)); if (!htc_hdr) { WARN_ON_ONCE(1); status = -EINVAL; break; } packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF; put_unaligned_le16(payload_len, &htc_hdr->payld_len); htc_hdr->flags = packet->info.tx.flags; htc_hdr->eid = (u8) packet->endpoint; htc_hdr->ctrl[0] = 0; htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno; spin_lock_bh(&target->tx_lock); /* store in look up queue to match completions */ list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue); ep->ep_st.tx_issued += 1; spin_unlock_bh(&target->tx_lock); status = ath6kl_hif_pipe_send(target->dev->ar, ep->pipe.pipeid_ul, NULL, skb); if (status != 0) { if (status != -ENOMEM) { /* TODO: if more than 1 endpoint maps to the * same PipeID, it is possible to run out of * resources in the HIF layer. * Don't emit the error */ ath6kl_dbg(ATH6KL_DBG_HTC, "%s: failed status:%d\n", __func__, status); } spin_lock_bh(&target->tx_lock); list_del(&packet->list); /* reclaim credits */ ep->cred_dist.credits += packet->info.tx.cred_used; spin_unlock_bh(&target->tx_lock); /* put it back into the callers queue */ list_add(&packet->list, pkt_queue); break; } } if (status != 0) { while (!list_empty(pkt_queue)) { if (status != -ENOMEM) { ath6kl_dbg(ATH6KL_DBG_HTC, "%s: failed pkt:0x%p status:%d\n", __func__, packet, status); } packet = list_first_entry(pkt_queue, struct htc_packet, list); list_del(&packet->list); packet->status = status; send_packet_completion(target, packet); } } return status; } static enum htc_send_queue_result htc_try_send(struct htc_target *target, struct htc_endpoint *ep, struct list_head *txq) { struct list_head send_queue; /* temp queue to hold packets */ struct htc_packet *packet, *tmp_pkt; struct ath6kl *ar = target->dev->ar; enum htc_send_full_action action; int tx_resources, overflow, txqueue_depth, i, good_pkts; u8 pipeid; ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n", __func__, txq, (txq == NULL) ? 0 : get_queue_depth(txq)); /* init the local send queue */ INIT_LIST_HEAD(&send_queue); /* * txq equals to NULL means * caller didn't provide a queue, just wants us to * check queues and send */ if (txq != NULL) { if (list_empty(txq)) { /* empty queue */ return HTC_SEND_QUEUE_DROP; } spin_lock_bh(&target->tx_lock); txqueue_depth = get_queue_depth(&ep->txq); spin_unlock_bh(&target->tx_lock); if (txqueue_depth >= ep->max_txq_depth) { /* we've already overflowed */ overflow = get_queue_depth(txq); } else { /* get how much we will overflow by */ overflow = txqueue_depth; overflow += get_queue_depth(txq); /* get how much we will overflow the TX queue by */ overflow -= ep->max_txq_depth; } /* if overflow is negative or zero, we are okay */ if (overflow > 0) { ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n", __func__, ep->eid, overflow, txqueue_depth, ep->max_txq_depth); } if ((overflow <= 0) || (ep->ep_cb.tx_full == NULL)) { /* * all packets will fit or caller did not provide send * full indication handler -- just move all of them * to the local send_queue object */ list_splice_tail_init(txq, &send_queue); } else { good_pkts = get_queue_depth(txq) - overflow; if (good_pkts < 0) { WARN_ON_ONCE(1); return HTC_SEND_QUEUE_DROP; } /* we have overflowed, and a callback is provided */ /* dequeue all non-overflow packets to the sendqueue */ for (i = 0; i < good_pkts; i++) { /* pop off caller's queue */ packet = list_first_entry(txq, struct htc_packet, list); /* move to local queue */ list_move_tail(&packet->list, &send_queue); } /* * the caller's queue has all the packets that won't fit * walk through the caller's queue and indicate each to * the send full handler */ list_for_each_entry_safe(packet, tmp_pkt, txq, list) { ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Indicate overflowed TX pkts: %p\n", __func__, packet); action = ep->ep_cb.tx_full(ep->target, packet); if (action == HTC_SEND_FULL_DROP) { /* callback wants the packet dropped */ ep->ep_st.tx_dropped += 1; /* leave this one in the caller's queue * for cleanup */ } else { /* callback wants to keep this packet, * move from caller's queue to the send * queue */ list_move_tail(&packet->list, &send_queue); } } if (list_empty(&send_queue)) { /* no packets made it in, caller will cleanup */ return HTC_SEND_QUEUE_DROP; } } } if (!ep->pipe.tx_credit_flow_enabled) { tx_resources = ath6kl_hif_pipe_get_free_queue_number(ar, ep->pipe.pipeid_ul); } else { tx_resources = 0; } spin_lock_bh(&target->tx_lock); if (!list_empty(&send_queue)) { /* transfer packets to tail */ list_splice_tail_init(&send_queue, &ep->txq); if (!list_empty(&send_queue)) { WARN_ON_ONCE(1); spin_unlock_bh(&target->tx_lock); return HTC_SEND_QUEUE_DROP; } INIT_LIST_HEAD(&send_queue); } /* increment tx processing count on entry */ ep->tx_proc_cnt++; if (ep->tx_proc_cnt > 1) { /* * Another thread or task is draining the TX queues on this * endpoint that thread will reset the tx processing count * when the queue is drained. */ ep->tx_proc_cnt--; spin_unlock_bh(&target->tx_lock); return HTC_SEND_QUEUE_OK; } /***** beyond this point only 1 thread may enter ******/ /* * Now drain the endpoint TX queue for transmission as long as we have * enough transmit resources. */ while (true) { if (get_queue_depth(&ep->txq) == 0) break; if (ep->pipe.tx_credit_flow_enabled) { /* * Credit based mechanism provides flow control * based on target transmit resource availability, * we assume that the HIF layer will always have * bus resources greater than target transmit * resources. */ get_htc_packet_credit_based(target, ep, &send_queue); } else { /* * Get all packets for this endpoint that we can * for this pass. */ get_htc_packet(target, ep, &send_queue, tx_resources); } if (get_queue_depth(&send_queue) == 0) { /* * Didn't get packets due to out of resources or TX * queue was drained. */ break; } spin_unlock_bh(&target->tx_lock); /* send what we can */ htc_issue_packets(target, ep, &send_queue); if (!ep->pipe.tx_credit_flow_enabled) { pipeid = ep->pipe.pipeid_ul; tx_resources = ath6kl_hif_pipe_get_free_queue_number(ar, pipeid); } spin_lock_bh(&target->tx_lock); } /* done with this endpoint, we can clear the count */ ep->tx_proc_cnt = 0; spin_unlock_bh(&target->tx_lock); return HTC_SEND_QUEUE_OK; } /* htc control packet manipulation */ static void destroy_htc_txctrl_packet(struct htc_packet *packet) { struct sk_buff *skb; skb = packet->skb; dev_kfree_skb(skb); kfree(packet); } static struct htc_packet *build_htc_txctrl_packet(void) { struct htc_packet *packet = NULL; struct sk_buff *skb; packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL); if (packet == NULL) return NULL; skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL); if (skb == NULL) { kfree(packet); return NULL; } packet->skb = skb; return packet; } static void htc_free_txctrl_packet(struct htc_target *target, struct htc_packet *packet) { destroy_htc_txctrl_packet(packet); } static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target) { return build_htc_txctrl_packet(); } static void htc_txctrl_complete(struct htc_target *target, struct htc_packet *packet) { htc_free_txctrl_packet(target, packet); } #define MAX_MESSAGE_SIZE 1536 static int htc_setup_target_buffer_assignments(struct htc_target *target) { int status, credits, credit_per_maxmsg, i; struct htc_pipe_txcredit_alloc *entry; unsigned int hif_usbaudioclass = 0; credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz; if (MAX_MESSAGE_SIZE % target->tgt_cred_sz) credit_per_maxmsg++; /* TODO, this should be configured by the caller! */ credits = target->tgt_creds; entry = &target->pipe.txcredit_alloc[0]; status = -ENOMEM; /* FIXME: hif_usbaudioclass is always zero */ if (hif_usbaudioclass) { ath6kl_dbg(ATH6KL_DBG_HTC, "%s: For USB Audio Class- Total:%d\n", __func__, credits); entry++; entry++; /* Setup VO Service To have Max Credits */ entry->service_id = WMI_DATA_VO_SVC; entry->credit_alloc = (credits - 6); if (entry->credit_alloc == 0) entry->credit_alloc++; credits -= (int) entry->credit_alloc; if (credits <= 0) return status; entry++; entry->service_id = WMI_CONTROL_SVC; entry->credit_alloc = credit_per_maxmsg; credits -= (int) entry->credit_alloc; if (credits <= 0) return status; /* leftovers go to best effort */ entry++; entry++; entry->service_id = WMI_DATA_BE_SVC; entry->credit_alloc = (u8) credits; status = 0; } else { entry++; entry->service_id = WMI_DATA_VI_SVC; entry->credit_alloc = credits / 4; if (entry->credit_alloc == 0) entry->credit_alloc++; credits -= (int) entry->credit_alloc; if (credits <= 0) return status; entry++; entry->service_id = WMI_DATA_VO_SVC; entry->credit_alloc = credits / 4; if (entry->credit_alloc == 0) entry->credit_alloc++; credits -= (int) entry->credit_alloc; if (credits <= 0) return status; entry++; entry->service_id = WMI_CONTROL_SVC; entry->credit_alloc = credit_per_maxmsg; credits -= (int) entry->credit_alloc; if (credits <= 0) return status; entry++; entry->service_id = WMI_DATA_BK_SVC; entry->credit_alloc = credit_per_maxmsg; credits -= (int) entry->credit_alloc; if (credits <= 0) return status; /* leftovers go to best effort */ entry++; entry->service_id = WMI_DATA_BE_SVC; entry->credit_alloc = (u8) credits; status = 0; } if (status == 0) { for (i = 0; i < ENDPOINT_MAX; i++) { if (target->pipe.txcredit_alloc[i].service_id != 0) { ath6kl_dbg(ATH6KL_DBG_HTC, "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n", i, target->pipe.txcredit_alloc[i]. service_id, target->pipe.txcredit_alloc[i]. credit_alloc); } } } return status; } /* process credit reports and call distribution function */ static void htc_process_credit_report(struct htc_target *target, struct htc_credit_report *rpt, int num_entries, enum htc_endpoint_id from_ep) { int total_credits = 0, i; struct htc_endpoint *ep; /* lock out TX while we update credits */ spin_lock_bh(&target->tx_lock); for (i = 0; i < num_entries; i++, rpt++) { if (rpt->eid >= ENDPOINT_MAX) { WARN_ON_ONCE(1); spin_unlock_bh(&target->tx_lock); return; } ep = &target->endpoint[rpt->eid]; ep->cred_dist.credits += rpt->credits; if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) { spin_unlock_bh(&target->tx_lock); htc_try_send(target, ep, NULL); spin_lock_bh(&target->tx_lock); } total_credits += rpt->credits; } ath6kl_dbg(ATH6KL_DBG_HTC, "Report indicated %d credits to distribute\n", total_credits); spin_unlock_bh(&target->tx_lock); } /* flush endpoint TX queue */ static void htc_flush_tx_endpoint(struct htc_target *target, struct htc_endpoint *ep, u16 tag) { struct htc_packet *packet; spin_lock_bh(&target->tx_lock); while (get_queue_depth(&ep->txq)) { packet = list_first_entry(&ep->txq, struct htc_packet, list); list_del(&packet->list); packet->status = 0; send_packet_completion(target, packet); } spin_unlock_bh(&target->tx_lock); } /* * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC, * since upper layers expects struct htc_packet containers we use the completed * skb and lookup it's corresponding HTC packet buffer from a lookup list. * This is extra overhead that can be fixed by re-aligning HIF interfaces with * HTC. */ static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target, struct htc_endpoint *ep, struct sk_buff *skb) { struct htc_packet *packet, *tmp_pkt, *found_packet = NULL; spin_lock_bh(&target->tx_lock); /* * interate from the front of tx lookup queue * this lookup should be fast since lower layers completes in-order and * so the completed packet should be at the head of the list generally */ list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue, list) { /* check for removal */ if (skb == packet->skb) { /* found it */ list_del(&packet->list); found_packet = packet; break; } } spin_unlock_bh(&target->tx_lock); return found_packet; } static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb) { struct htc_target *target = ar->htc_target; struct htc_frame_hdr *htc_hdr; struct htc_endpoint *ep; struct htc_packet *packet; u8 ep_id, *netdata; netdata = skb->data; htc_hdr = (struct htc_frame_hdr *) netdata; ep_id = htc_hdr->eid; ep = &target->endpoint[ep_id]; packet = htc_lookup_tx_packet(target, ep, skb); if (packet == NULL) { /* may have already been flushed and freed */ ath6kl_err("HTC TX lookup failed!\n"); } else { /* will be giving this buffer back to upper layers */ packet->status = 0; send_packet_completion(target, packet); } skb = NULL; if (!ep->pipe.tx_credit_flow_enabled) { /* * note: when using TX credit flow, the re-checking of queues * happens when credits flow back from the target. in the * non-TX credit case, we recheck after the packet completes */ htc_try_send(target, ep, NULL); } return 0; } static int htc_send_packets_multiple(struct htc_target *target, struct list_head *pkt_queue) { struct htc_endpoint *ep; struct htc_packet *packet, *tmp_pkt; if (list_empty(pkt_queue)) return -EINVAL; /* get first packet to find out which ep the packets will go into */ packet = list_first_entry(pkt_queue, struct htc_packet, list); if (packet->endpoint >= ENDPOINT_MAX) { WARN_ON_ONCE(1); return -EINVAL; } ep = &target->endpoint[packet->endpoint]; htc_try_send(target, ep, pkt_queue); /* do completion on any packets that couldn't get in */ if (!list_empty(pkt_queue)) { list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { packet->status = -ENOMEM; } do_send_completion(ep, pkt_queue); } return 0; } /* htc pipe rx path */ static struct htc_packet *alloc_htc_packet_container(struct htc_target *target) { struct htc_packet *packet; spin_lock_bh(&target->rx_lock); if (target->pipe.htc_packet_pool == NULL) { spin_unlock_bh(&target->rx_lock); return NULL; } packet = target->pipe.htc_packet_pool; target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next; spin_unlock_bh(&target->rx_lock); packet->list.next = NULL; return packet; } static void free_htc_packet_container(struct htc_target *target, struct htc_packet *packet) { struct list_head *lh; spin_lock_bh(&target->rx_lock); if (target->pipe.htc_packet_pool == NULL) { target->pipe.htc_packet_pool = packet; packet->list.next = NULL; } else { lh = (struct list_head *) target->pipe.htc_packet_pool; packet->list.next = lh; target->pipe.htc_packet_pool = packet; } spin_unlock_bh(&target->rx_lock); } static int htc_process_trailer(struct htc_target *target, u8 *buffer, int len, enum htc_endpoint_id from_ep) { struct htc_credit_report *report; struct htc_record_hdr *record; u8 *record_buf; int status = 0; while (len > 0) { if (len < sizeof(struct htc_record_hdr)) { status = -EINVAL; break; } /* these are byte aligned structs */ record = (struct htc_record_hdr *) buffer; len -= sizeof(struct htc_record_hdr); buffer += sizeof(struct htc_record_hdr); if (record->len > len) { /* no room left in buffer for record */ ath6kl_dbg(ATH6KL_DBG_HTC, "invalid length: %d (id:%d) buffer has: %d bytes left\n", record->len, record->rec_id, len); status = -EINVAL; break; } /* start of record follows the header */ record_buf = buffer; switch (record->rec_id) { case HTC_RECORD_CREDITS: if (record->len < sizeof(struct htc_credit_report)) { WARN_ON_ONCE(1); return -EINVAL; } report = (struct htc_credit_report *) record_buf; htc_process_credit_report(target, report, record->len / sizeof(*report), from_ep); break; default: ath6kl_dbg(ATH6KL_DBG_HTC, "unhandled record: id:%d length:%d\n", record->rec_id, record->len); break; } /* advance buffer past this record for next time around */ buffer += record->len; len -= record->len; } return status; } static void do_recv_completion(struct htc_endpoint *ep, struct list_head *queue_to_indicate) { struct htc_packet *packet; if (list_empty(queue_to_indicate)) { /* nothing to indicate */ return; } /* using legacy EpRecv */ while (!list_empty(queue_to_indicate)) { packet = list_first_entry(queue_to_indicate, struct htc_packet, list); list_del(&packet->list); ep->ep_cb.rx(ep->target, packet); } return; } static void recv_packet_completion(struct htc_target *target, struct htc_endpoint *ep, struct htc_packet *packet) { struct list_head container; INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); /* do completion */ do_recv_completion(ep, &container); } static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipeid) { struct htc_target *target = ar->htc_target; u8 *netdata, *trailer, hdr_info; struct htc_frame_hdr *htc_hdr; u32 netlen, trailerlen = 0; struct htc_packet *packet; struct htc_endpoint *ep; u16 payload_len; int status = 0; /* * ar->htc_target can be NULL due to a race condition that can occur * during driver initialization(we do 'ath6kl_hif_power_on' before * initializing 'ar->htc_target' via 'ath6kl_htc_create'). * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as * usb_complete_t/callback function for 'usb_fill_bulk_urb'. * Thus the possibility of ar->htc_target being NULL * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work. */ if (!target) { ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n"); status = -EINVAL; goto free_skb; } netdata = skb->data; netlen = skb->len; htc_hdr = (struct htc_frame_hdr *) netdata; if (htc_hdr->eid >= ENDPOINT_MAX) { ath6kl_dbg(ATH6KL_DBG_HTC, "HTC Rx: invalid EndpointID=%d\n", htc_hdr->eid); status = -EINVAL; goto free_skb; } ep = &target->endpoint[htc_hdr->eid]; payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); if (netlen < (payload_len + HTC_HDR_LENGTH)) { ath6kl_dbg(ATH6KL_DBG_HTC, "HTC Rx: insufficient length, got:%d expected =%zu\n", netlen, payload_len + HTC_HDR_LENGTH); status = -EINVAL; goto free_skb; } /* get flags to check for trailer */ hdr_info = htc_hdr->flags; if (hdr_info & HTC_FLG_RX_TRAILER) { /* extract the trailer length */ hdr_info = htc_hdr->ctrl[0]; if ((hdr_info < sizeof(struct htc_record_hdr)) || (hdr_info > payload_len)) { ath6kl_dbg(ATH6KL_DBG_HTC, "invalid header: payloadlen should be %d, CB[0]: %d\n", payload_len, hdr_info); status = -EINVAL; goto free_skb; } trailerlen = hdr_info; /* process trailer after hdr/apps payload */ trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH + payload_len - hdr_info; status = htc_process_trailer(target, trailer, hdr_info, htc_hdr->eid); if (status != 0) goto free_skb; } if (((int) payload_len - (int) trailerlen) <= 0) { /* zero length packet with trailer, just drop these */ goto free_skb; } if (htc_hdr->eid == ENDPOINT_0) { /* handle HTC control message */ if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) { /* * fatal: target should not send unsolicited * messageson the endpoint 0 */ ath6kl_dbg(ATH6KL_DBG_HTC, "HTC ignores Rx Ctrl after setup complete\n"); status = -EINVAL; goto free_skb; } /* remove HTC header */ skb_pull(skb, HTC_HDR_LENGTH); netdata = skb->data; netlen = skb->len; spin_lock_bh(&target->rx_lock); target->pipe.ctrl_response_valid = true; target->pipe.ctrl_response_len = min_t(int, netlen, HTC_MAX_CTRL_MSG_LEN); memcpy(target->pipe.ctrl_response_buf, netdata, target->pipe.ctrl_response_len); spin_unlock_bh(&target->rx_lock); dev_kfree_skb(skb); skb = NULL; goto free_skb; } /* * TODO: the message based HIF architecture allocates net bufs * for recv packets since it bridges that HIF to upper layers, * which expects HTC packets, we form the packets here */ packet = alloc_htc_packet_container(target); if (packet == NULL) { status = -ENOMEM; goto free_skb; } packet->status = 0; packet->endpoint = htc_hdr->eid; packet->pkt_cntxt = skb; /* TODO: for backwards compatibility */ packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH; packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen; /* * TODO: this is a hack because the driver layer will set the * actual len of the skb again which will just double the len */ skb_trim(skb, 0); recv_packet_completion(target, ep, packet); /* recover the packet container */ free_htc_packet_container(target, packet); skb = NULL; free_skb: dev_kfree_skb(skb); return status; } static void htc_flush_rx_queue(struct htc_target *target, struct htc_endpoint *ep) { struct list_head container; struct htc_packet *packet; spin_lock_bh(&target->rx_lock); while (1) { if (list_empty(&ep->rx_bufq)) break; packet = list_first_entry(&ep->rx_bufq, struct htc_packet, list); list_del(&packet->list); spin_unlock_bh(&target->rx_lock); packet->status = -ECANCELED; packet->act_len = 0; ath6kl_dbg(ATH6KL_DBG_HTC, "Flushing RX packet:0x%p, length:%d, ep:%d\n", packet, packet->buf_len, packet->endpoint); INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); /* give the packet back */ do_recv_completion(ep, &container); spin_lock_bh(&target->rx_lock); } spin_unlock_bh(&target->rx_lock); } /* polling routine to wait for a control packet to be received */ static int htc_wait_recv_ctrl_message(struct htc_target *target) { int count = HTC_TARGET_RESPONSE_POLL_COUNT; while (count > 0) { spin_lock_bh(&target->rx_lock); if (target->pipe.ctrl_response_valid) { target->pipe.ctrl_response_valid = false; spin_unlock_bh(&target->rx_lock); break; } spin_unlock_bh(&target->rx_lock); count--; msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT); } if (count <= 0) { ath6kl_warn("htc pipe control receive timeout!\n"); return -ETIMEDOUT; } return 0; } static void htc_rxctrl_complete(struct htc_target *context, struct htc_packet *packet) { struct sk_buff *skb = packet->skb; if (packet->endpoint == ENDPOINT_0 && packet->status == -ECANCELED && skb != NULL) dev_kfree_skb(skb); } /* htc pipe initialization */ static void reset_endpoint_states(struct htc_target *target) { struct htc_endpoint *ep; int i; for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { ep = &target->endpoint[i]; ep->svc_id = 0; ep->len_max = 0; ep->max_txq_depth = 0; ep->eid = i; INIT_LIST_HEAD(&ep->txq); INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue); INIT_LIST_HEAD(&ep->rx_bufq); ep->target = target; ep->pipe.tx_credit_flow_enabled = true; } } /* start HTC, this is called after all services are connected */ static int htc_config_target_hif_pipe(struct htc_target *target) { return 0; } /* htc service functions */ static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id) { u8 allocation = 0; int i; for (i = 0; i < ENDPOINT_MAX; i++) { if (target->pipe.txcredit_alloc[i].service_id == service_id) allocation = target->pipe.txcredit_alloc[i].credit_alloc; } if (allocation == 0) { ath6kl_dbg(ATH6KL_DBG_HTC, "HTC Service TX : 0x%2.2X : allocation is zero!\n", service_id); } return allocation; } static int ath6kl_htc_pipe_conn_service(struct htc_target *target, struct htc_service_connect_req *conn_req, struct htc_service_connect_resp *conn_resp) { struct ath6kl *ar = target->dev->ar; struct htc_packet *packet = NULL; struct htc_conn_service_resp *resp_msg; struct htc_conn_service_msg *conn_msg; enum htc_endpoint_id assigned_epid = ENDPOINT_MAX; bool disable_credit_flowctrl = false; unsigned int max_msg_size = 0; struct htc_endpoint *ep; int length, status = 0; struct sk_buff *skb; u8 tx_alloc; u16 flags; if (conn_req->svc_id == 0) { WARN_ON_ONCE(1); status = -EINVAL; goto free_packet; } if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { /* special case for pseudo control service */ assigned_epid = ENDPOINT_0; max_msg_size = HTC_MAX_CTRL_MSG_LEN; tx_alloc = 0; } else { tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id); if (tx_alloc == 0) { status = -ENOMEM; goto free_packet; } /* allocate a packet to send to the target */ packet = htc_alloc_txctrl_packet(target); if (packet == NULL) { WARN_ON_ONCE(1); status = -ENOMEM; goto free_packet; } skb = packet->skb; length = sizeof(struct htc_conn_service_msg); /* assemble connect service message */ conn_msg = skb_put(skb, length); if (conn_msg == NULL) { WARN_ON_ONCE(1); status = -EINVAL; goto free_packet; } memset(conn_msg, 0, sizeof(struct htc_conn_service_msg)); conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags & ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK); /* tell target desired recv alloc for this ep */ flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT; conn_msg->conn_flags |= cpu_to_le16(flags); if (conn_req->conn_flags & HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) { disable_credit_flowctrl = true; } set_htc_pkt_info(packet, NULL, (u8 *) conn_msg, length, ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); status = ath6kl_htc_pipe_tx(target, packet); /* we don't own it anymore */ packet = NULL; if (status != 0) goto free_packet; /* wait for response */ status = htc_wait_recv_ctrl_message(target); if (status != 0) goto free_packet; /* we controlled the buffer creation so it has to be * properly aligned */ resp_msg = (struct htc_conn_service_resp *) target->pipe.ctrl_response_buf; if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) || (target->pipe.ctrl_response_len < sizeof(*resp_msg))) { /* this message is not valid */ WARN_ON_ONCE(1); status = -EINVAL; goto free_packet; } ath6kl_dbg(ATH6KL_DBG_TRC, "%s: service 0x%X conn resp: status: %d ep: %d\n", __func__, resp_msg->svc_id, resp_msg->status, resp_msg->eid); conn_resp->resp_code = resp_msg->status; /* check response status */ if (resp_msg->status != HTC_SERVICE_SUCCESS) { ath6kl_dbg(ATH6KL_DBG_HTC, "Target failed service 0x%X connect request (status:%d)\n", resp_msg->svc_id, resp_msg->status); status = -EINVAL; goto free_packet; } assigned_epid = (enum htc_endpoint_id) resp_msg->eid; max_msg_size = le16_to_cpu(resp_msg->max_msg_sz); } /* the rest are parameter checks so set the error status */ status = -EINVAL; if (assigned_epid >= ENDPOINT_MAX) { WARN_ON_ONCE(1); goto free_packet; } if (max_msg_size == 0) { WARN_ON_ONCE(1); goto free_packet; } ep = &target->endpoint[assigned_epid]; ep->eid = assigned_epid; if (ep->svc_id != 0) { /* endpoint already in use! */ WARN_ON_ONCE(1); goto free_packet; } /* return assigned endpoint to caller */ conn_resp->endpoint = assigned_epid; conn_resp->len_max = max_msg_size; /* setup the endpoint */ ep->svc_id = conn_req->svc_id; /* this marks ep in use */ ep->max_txq_depth = conn_req->max_txq_depth; ep->len_max = max_msg_size; ep->cred_dist.credits = tx_alloc; ep->cred_dist.cred_sz = target->tgt_cred_sz; ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz; if (max_msg_size % target->tgt_cred_sz) ep->cred_dist.cred_per_msg++; /* copy all the callbacks */ ep->ep_cb = conn_req->ep_cb; /* initialize tx_drop_packet_threshold */ ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; status = ath6kl_hif_pipe_map_service(ar, ep->svc_id, &ep->pipe.pipeid_ul, &ep->pipe.pipeid_dl); if (status != 0) goto free_packet; ath6kl_dbg(ATH6KL_DBG_HTC, "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n", ep->svc_id, ep->pipe.pipeid_ul, ep->pipe.pipeid_dl, ep->eid); if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) { ep->pipe.tx_credit_flow_enabled = false; ath6kl_dbg(ATH6KL_DBG_HTC, "SVC: 0x%4.4X ep:%d TX flow control off\n", ep->svc_id, assigned_epid); } free_packet: if (packet != NULL) htc_free_txctrl_packet(target, packet); return status; } /* htc export functions */ static void *ath6kl_htc_pipe_create(struct ath6kl *ar) { int status = 0; struct htc_endpoint *ep = NULL; struct htc_target *target = NULL; struct htc_packet *packet; int i; target = kzalloc(sizeof(struct htc_target), GFP_KERNEL); if (target == NULL) { ath6kl_err("htc create unable to allocate memory\n"); status = -ENOMEM; goto fail_htc_create; } spin_lock_init(&target->htc_lock); spin_lock_init(&target->rx_lock); spin_lock_init(&target->tx_lock); reset_endpoint_states(target); for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) { packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL); if (packet != NULL) free_htc_packet_container(target, packet); } target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); if (!target->dev) { ath6kl_err("unable to allocate memory\n"); status = -ENOMEM; goto fail_htc_create; } target->dev->ar = ar; target->dev->htc_cnxt = target; /* Get HIF default pipe for HTC message exchange */ ep = &target->endpoint[ENDPOINT_0]; ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul, &ep->pipe.pipeid_dl); return target; fail_htc_create: if (status != 0) { if (target != NULL) ath6kl_htc_pipe_cleanup(target); target = NULL; } return target; } /* cleanup the HTC instance */ static void ath6kl_htc_pipe_cleanup(struct htc_target *target) { struct htc_packet *packet; while (true) { packet = alloc_htc_packet_container(target); if (packet == NULL) break; kfree(packet); } kfree(target->dev); /* kfree our instance */ kfree(target); } static int ath6kl_htc_pipe_start(struct htc_target *target) { struct sk_buff *skb; struct htc_setup_comp_ext_msg *setup; struct htc_packet *packet; htc_config_target_hif_pipe(target); /* allocate a buffer to send */ packet = htc_alloc_txctrl_packet(target); if (packet == NULL) { WARN_ON_ONCE(1); return -ENOMEM; } skb = packet->skb; /* assemble setup complete message */ setup = skb_put(skb, sizeof(*setup)); memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg)); setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n"); set_htc_pkt_info(packet, NULL, (u8 *) setup, sizeof(struct htc_setup_comp_ext_msg), ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE; return ath6kl_htc_pipe_tx(target, packet); } static void ath6kl_htc_pipe_stop(struct htc_target *target) { int i; struct htc_endpoint *ep; /* cleanup endpoints */ for (i = 0; i < ENDPOINT_MAX; i++) { ep = &target->endpoint[i]; htc_flush_rx_queue(target, ep); htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL); } reset_endpoint_states(target); target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE; } static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint) { int num; spin_lock_bh(&target->rx_lock); num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); spin_unlock_bh(&target->rx_lock); return num; } static int ath6kl_htc_pipe_tx(struct htc_target *target, struct htc_packet *packet) { struct list_head queue; ath6kl_dbg(ATH6KL_DBG_HTC, "%s: endPointId: %d, buffer: 0x%p, length: %d\n", __func__, packet->endpoint, packet->buf, packet->act_len); INIT_LIST_HEAD(&queue); list_add_tail(&packet->list, &queue); return htc_send_packets_multiple(target, &queue); } static int ath6kl_htc_pipe_wait_target(struct htc_target *target) { struct htc_ready_ext_msg *ready_msg; struct htc_service_connect_req connect; struct htc_service_connect_resp resp; int status = 0; status = htc_wait_recv_ctrl_message(target); if (status != 0) return status; if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) { ath6kl_warn("invalid htc pipe ready msg len: %d\n", target->pipe.ctrl_response_len); return -ECOMM; } ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf; if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) { ath6kl_warn("invalid htc pipe ready msg: 0x%x\n", ready_msg->ver2_0_info.msg_id); return -ECOMM; } ath6kl_dbg(ATH6KL_DBG_HTC, "Target Ready! : transmit resources : %d size:%d\n", ready_msg->ver2_0_info.cred_cnt, ready_msg->ver2_0_info.cred_sz); target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt); target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz); if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0)) return -ECOMM; htc_setup_target_buffer_assignments(target); /* setup our pseudo HTC control endpoint connection */ memset(&connect, 0, sizeof(connect)); memset(&resp, 0, sizeof(resp)); connect.ep_cb.tx_complete = htc_txctrl_complete; connect.ep_cb.rx = htc_rxctrl_complete; connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS; connect.svc_id = HTC_CTRL_RSVD_SVC; /* connect fake service */ status = ath6kl_htc_pipe_conn_service(target, &connect, &resp); return status; } static void ath6kl_htc_pipe_flush_txep(struct htc_target *target, enum htc_endpoint_id endpoint, u16 tag) { struct htc_endpoint *ep = &target->endpoint[endpoint]; if (ep->svc_id == 0) { WARN_ON_ONCE(1); /* not in use.. */ return; } htc_flush_tx_endpoint(target, ep, tag); } static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target, struct list_head *pkt_queue) { struct htc_packet *packet, *tmp_pkt, *first; struct htc_endpoint *ep; int status = 0; if (list_empty(pkt_queue)) return -EINVAL; first = list_first_entry(pkt_queue, struct htc_packet, list); if (first->endpoint >= ENDPOINT_MAX) { WARN_ON_ONCE(1); return -EINVAL; } ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n", __func__, first->endpoint, get_queue_depth(pkt_queue), first->buf_len); ep = &target->endpoint[first->endpoint]; spin_lock_bh(&target->rx_lock); /* store receive packets */ list_splice_tail_init(pkt_queue, &ep->rx_bufq); spin_unlock_bh(&target->rx_lock); if (status != 0) { /* walk through queue and mark each one canceled */ list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { packet->status = -ECANCELED; } do_recv_completion(ep, pkt_queue); } return status; } static void ath6kl_htc_pipe_activity_changed(struct htc_target *target, enum htc_endpoint_id ep, bool active) { /* TODO */ } static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target) { struct htc_endpoint *endpoint; struct htc_packet *packet, *tmp_pkt; int i; for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { endpoint = &target->endpoint[i]; spin_lock_bh(&target->rx_lock); list_for_each_entry_safe(packet, tmp_pkt, &endpoint->rx_bufq, list) { list_del(&packet->list); spin_unlock_bh(&target->rx_lock); ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx flush pkt 0x%p len %d ep %d\n", packet, packet->buf_len, packet->endpoint); dev_kfree_skb(packet->pkt_cntxt); spin_lock_bh(&target->rx_lock); } spin_unlock_bh(&target->rx_lock); } } static int ath6kl_htc_pipe_credit_setup(struct htc_target *target, struct ath6kl_htc_credit_info *info) { return 0; } static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = { .create = ath6kl_htc_pipe_create, .wait_target = ath6kl_htc_pipe_wait_target, .start = ath6kl_htc_pipe_start, .conn_service = ath6kl_htc_pipe_conn_service, .tx = ath6kl_htc_pipe_tx, .stop = ath6kl_htc_pipe_stop, .cleanup = ath6kl_htc_pipe_cleanup, .flush_txep = ath6kl_htc_pipe_flush_txep, .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf, .activity_changed = ath6kl_htc_pipe_activity_changed, .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num, .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple, .credit_setup = ath6kl_htc_pipe_credit_setup, .tx_complete = ath6kl_htc_pipe_tx_complete, .rx_complete = ath6kl_htc_pipe_rx_complete, }; void ath6kl_htc_pipe_attach(struct ath6kl *ar) { ar->htc_ops = &ath6kl_htc_pipe_ops; }
4 4 3 3 2 2 1 1 1 6 6 6 1 6 6 6 6 6 6 6 5 1 1 1 1 1 4 4 1 3 1 2 1 1 1 1 1 1 1 1 1 4 6 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 // SPDX-License-Identifier: GPL-2.0-or-later /**************************************************************** * * kaweth.c - driver for KL5KUSB101 based USB->Ethernet * * (c) 2000 Interlan Communications * (c) 2000 Stephane Alnet * (C) 2001 Brad Hards * (C) 2002 Oliver Neukum * * Original author: The Zapman <zapman@interlan.net> * Inspired by, and much credit goes to Michael Rothwell * <rothwell@interlan.net> for the test equipment, help, and patience * Based off of (and with thanks to) Petko Manolov's pegaus.c driver. * Also many thanks to Joel Silverman and Ed Surprenant at Kawasaki * for providing the firmware and driver resources. * ****************************************************************/ /* TODO: * Develop test procedures for USB net interfaces * Run test procedures * Fix bugs from previous two steps * Snoop other OSs for any tricks we're not doing * Reduce arbitrary timeouts * Smart multicast support * Temporary MAC change support * Tunable SOFs parameter - ioctl()? * Ethernet stats collection * Code formatting improvements */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/types.h> #include <linux/ethtool.h> #include <linux/dma-mapping.h> #include <linux/wait.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #undef DEBUG #define KAWETH_MTU 1514 #define KAWETH_BUF_SIZE 1664 #define KAWETH_TX_TIMEOUT (5 * HZ) #define KAWETH_SCRATCH_SIZE 32 #define KAWETH_FIRMWARE_BUF_SIZE 4096 #define KAWETH_CONTROL_TIMEOUT (30000) #define KAWETH_STATUS_BROKEN 0x0000001 #define KAWETH_STATUS_CLOSING 0x0000002 #define KAWETH_STATUS_SUSPENDING 0x0000004 #define KAWETH_STATUS_BLOCKED (KAWETH_STATUS_CLOSING | KAWETH_STATUS_SUSPENDING) #define KAWETH_PACKET_FILTER_PROMISCUOUS 0x01 #define KAWETH_PACKET_FILTER_ALL_MULTICAST 0x02 #define KAWETH_PACKET_FILTER_DIRECTED 0x04 #define KAWETH_PACKET_FILTER_BROADCAST 0x08 #define KAWETH_PACKET_FILTER_MULTICAST 0x10 /* Table 7 */ #define KAWETH_COMMAND_GET_ETHERNET_DESC 0x00 #define KAWETH_COMMAND_MULTICAST_FILTERS 0x01 #define KAWETH_COMMAND_SET_PACKET_FILTER 0x02 #define KAWETH_COMMAND_STATISTICS 0x03 #define KAWETH_COMMAND_SET_TEMP_MAC 0x06 #define KAWETH_COMMAND_GET_TEMP_MAC 0x07 #define KAWETH_COMMAND_SET_URB_SIZE 0x08 #define KAWETH_COMMAND_SET_SOFS_WAIT 0x09 #define KAWETH_COMMAND_SCAN 0xFF #define KAWETH_SOFS_TO_WAIT 0x05 #define INTBUFFERSIZE 4 #define STATE_OFFSET 0 #define STATE_MASK 0x40 #define STATE_SHIFT 5 #define IS_BLOCKED(s) (s & KAWETH_STATUS_BLOCKED) MODULE_AUTHOR("Michael Zappe <zapman@interlan.net>, Stephane Alnet <stephane@u-picardie.fr>, Brad Hards <bhards@bigpond.net.au> and Oliver Neukum <oliver@neukum.org>"); MODULE_DESCRIPTION("KL5USB101 USB Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("kaweth/new_code.bin"); MODULE_FIRMWARE("kaweth/new_code_fix.bin"); MODULE_FIRMWARE("kaweth/trigger_code.bin"); MODULE_FIRMWARE("kaweth/trigger_code_fix.bin"); static const char driver_name[] = "kaweth"; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ); static void kaweth_disconnect(struct usb_interface *intf); static int kaweth_suspend(struct usb_interface *intf, pm_message_t message); static int kaweth_resume(struct usb_interface *intf); /**************************************************************** * usb_device_id ****************************************************************/ static const struct usb_device_id usb_klsi_table[] = { { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */ { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */ { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */ { USB_DEVICE(0x0506, 0x11f8) }, /* 3Com 3C460 */ { USB_DEVICE(0x0557, 0x2002) }, /* ATEN USB Ethernet */ { USB_DEVICE(0x0557, 0x4000) }, /* D-Link DSB-650C */ { USB_DEVICE(0x0565, 0x0002) }, /* Peracom Enet */ { USB_DEVICE(0x0565, 0x0003) }, /* Optus@Home UEP1045A */ { USB_DEVICE(0x0565, 0x0005) }, /* Peracom Enet2 */ { USB_DEVICE(0x05e9, 0x0008) }, /* KLSI KL5KUSB101B */ { USB_DEVICE(0x05e9, 0x0009) }, /* KLSI KL5KUSB101B (Board change) */ { USB_DEVICE(0x066b, 0x2202) }, /* Linksys USB10T */ { USB_DEVICE(0x06e1, 0x0008) }, /* ADS USB-10BT */ { USB_DEVICE(0x06e1, 0x0009) }, /* ADS USB-10BT */ { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x085a, 0x0009) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x087d, 0x5704) }, /* Jaton USB Ethernet Device Adapter */ { USB_DEVICE(0x0951, 0x0008) }, /* Kingston Technology USB Ethernet Adapter */ { USB_DEVICE(0x095a, 0x3003) }, /* Portsmith Express Ethernet Adapter */ { USB_DEVICE(0x10bd, 0x1427) }, /* ASANTE USB To Ethernet Adapter */ { USB_DEVICE(0x1342, 0x0204) }, /* Mobility USB-Ethernet Adapter */ { USB_DEVICE(0x13d2, 0x0400) }, /* Shark Pocket Adapter */ { USB_DEVICE(0x1485, 0x0001) }, /* Silicom U2E */ { USB_DEVICE(0x1485, 0x0002) }, /* Psion Dacom Gold Port Ethernet */ { USB_DEVICE(0x1645, 0x0005) }, /* Entrega E45 */ { USB_DEVICE(0x1645, 0x0008) }, /* Entrega USB Ethernet Adapter */ { USB_DEVICE(0x1645, 0x8005) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x1668, 0x0323) }, /* Actiontec USB Ethernet */ { USB_DEVICE(0x2001, 0x4000) }, /* D-link DSB-650C */ {} /* Null terminator */ }; MODULE_DEVICE_TABLE (usb, usb_klsi_table); /**************************************************************** * kaweth_driver ****************************************************************/ static struct usb_driver kaweth_driver = { .name = driver_name, .probe = kaweth_probe, .disconnect = kaweth_disconnect, .suspend = kaweth_suspend, .resume = kaweth_resume, .id_table = usb_klsi_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; typedef __u8 eth_addr_t[6]; /**************************************************************** * usb_eth_dev ****************************************************************/ struct usb_eth_dev { char *name; __u16 vendor; __u16 device; void *pdata; }; /**************************************************************** * kaweth_ethernet_configuration * Refer Table 8 ****************************************************************/ struct kaweth_ethernet_configuration { __u8 size; __u8 reserved1; __u8 reserved2; eth_addr_t hw_addr; __u32 statistics_mask; __le16 segment_size; __u16 max_multicast_filters; __u8 reserved3; } __packed; /**************************************************************** * kaweth_device ****************************************************************/ struct kaweth_device { spinlock_t device_lock; __u32 status; int end; int suspend_lowmem_rx; int suspend_lowmem_ctrl; int linkstate; int opened; struct delayed_work lowmem_work; struct usb_device *dev; struct usb_interface *intf; struct net_device *net; wait_queue_head_t term_wait; struct urb *rx_urb; struct urb *tx_urb; struct urb *irq_urb; dma_addr_t intbufferhandle; __u8 *intbuffer; dma_addr_t rxbufferhandle; __u8 *rx_buf; struct sk_buff *tx_skb; __u8 *firmware_buf; __u8 scratch[KAWETH_SCRATCH_SIZE]; __u16 packet_filter_bitmap; struct kaweth_ethernet_configuration configuration; }; /**************************************************************** * kaweth_read_configuration ****************************************************************/ static int kaweth_read_configuration(struct kaweth_device *kaweth) { return usb_control_msg(kaweth->dev, usb_rcvctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_GET_ETHERNET_DESC, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, &kaweth->configuration, sizeof(kaweth->configuration), KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_urb_size ****************************************************************/ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size) { netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_URB_SIZE, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, urb_size, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_sofs_wait ****************************************************************/ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait) { netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_SOFS_WAIT, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, sofs_wait, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_receive_filter ****************************************************************/ static int kaweth_set_receive_filter(struct kaweth_device *kaweth, __u16 receive_filter) { netdev_dbg(kaweth->net, "Set receive filter to %d\n", (unsigned)receive_filter); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, receive_filter, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_download_firmware ****************************************************************/ static int kaweth_download_firmware(struct kaweth_device *kaweth, const char *fwname, __u8 interrupt, __u8 type) { const struct firmware *fw; int data_len; int ret; ret = request_firmware(&fw, fwname, &kaweth->dev->dev); if (ret) { dev_err(&kaweth->intf->dev, "Firmware request failed\n"); return ret; } if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { dev_err(&kaweth->intf->dev, "Firmware too big: %zu\n", fw->size); release_firmware(fw); return -ENOSPC; } data_len = fw->size; memcpy(kaweth->firmware_buf, fw->data, fw->size); release_firmware(fw); kaweth->firmware_buf[2] = (data_len & 0xFF) - 7; kaweth->firmware_buf[3] = data_len >> 8; kaweth->firmware_buf[4] = type; kaweth->firmware_buf[5] = interrupt; netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3], kaweth->firmware_buf[2]); netdev_dbg(kaweth->net, "Downloading firmware at %p to kaweth device at %p\n", kaweth->firmware_buf, kaweth); netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, kaweth->firmware_buf, data_len, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_trigger_firmware ****************************************************************/ static int kaweth_trigger_firmware(struct kaweth_device *kaweth, __u8 interrupt) { kaweth->firmware_buf[0] = 0xB6; kaweth->firmware_buf[1] = 0xC3; kaweth->firmware_buf[2] = 0x01; kaweth->firmware_buf[3] = 0x00; kaweth->firmware_buf[4] = 0x06; kaweth->firmware_buf[5] = interrupt; kaweth->firmware_buf[6] = 0x00; kaweth->firmware_buf[7] = 0x00; return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, (void *)kaweth->firmware_buf, 8, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_reset ****************************************************************/ static int kaweth_reset(struct kaweth_device *kaweth) { int result; result = usb_reset_configuration(kaweth->dev); mdelay(10); netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result); return result; } static void kaweth_usb_receive(struct urb *); static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t); /**************************************************************** int_callback *****************************************************************/ static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf) { int status; status = usb_submit_urb (kaweth->irq_urb, mf); if (unlikely(status == -ENOMEM)) { kaweth->suspend_lowmem_ctrl = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } else { kaweth->suspend_lowmem_ctrl = 0; } if (status) dev_err(&kaweth->intf->dev, "can't resubmit intr, %s-%s, status %d\n", kaweth->dev->bus->bus_name, kaweth->dev->devpath, status); } static void int_callback(struct urb *u) { struct kaweth_device *kaweth = u->context; int act_state; int status = u->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ goto resubmit; } /* we check the link state to report changes */ if (kaweth->linkstate != (act_state = ( kaweth->intbuffer[STATE_OFFSET] | STATE_MASK) >> STATE_SHIFT)) { if (act_state) netif_carrier_on(kaweth->net); else netif_carrier_off(kaweth->net); kaweth->linkstate = act_state; } resubmit: kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); } static void kaweth_resubmit_tl(struct work_struct *work) { struct kaweth_device *kaweth = container_of(work, struct kaweth_device, lowmem_work.work); if (IS_BLOCKED(kaweth->status)) return; if (kaweth->suspend_lowmem_rx) kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); if (kaweth->suspend_lowmem_ctrl) kaweth_resubmit_int_urb(kaweth, GFP_NOIO); } /**************************************************************** * kaweth_resubmit_rx_urb ****************************************************************/ static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth, gfp_t mem_flags) { int result; usb_fill_bulk_urb(kaweth->rx_urb, kaweth->dev, usb_rcvbulkpipe(kaweth->dev, 1), kaweth->rx_buf, KAWETH_BUF_SIZE, kaweth_usb_receive, kaweth); kaweth->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; kaweth->rx_urb->transfer_dma = kaweth->rxbufferhandle; if((result = usb_submit_urb(kaweth->rx_urb, mem_flags))) { if (result == -ENOMEM) { kaweth->suspend_lowmem_rx = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } dev_err(&kaweth->intf->dev, "resubmitting rx_urb %d failed\n", result); } else { kaweth->suspend_lowmem_rx = 0; } return result; } static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth, bool may_sleep); /**************************************************************** * kaweth_usb_receive ****************************************************************/ static void kaweth_usb_receive(struct urb *urb) { struct device *dev = &urb->dev->dev; struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; unsigned long flags; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; __u16 pkt_len = le16_to_cpup((__le16 *)kaweth->rx_buf); struct sk_buff *skb; if (unlikely(status == -EPIPE)) { net->stats.rx_errors++; kaweth->end = 1; wake_up(&kaweth->term_wait); dev_dbg(dev, "Status was -EPIPE.\n"); return; } if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) { /* we are killed - set a flag and wake the disconnect handler */ kaweth->end = 1; wake_up(&kaweth->term_wait); dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n"); return; } if (unlikely(status == -EPROTO || status == -ETIME || status == -EILSEQ)) { net->stats.rx_errors++; dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n"); return; } if (unlikely(status == -EOVERFLOW)) { net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } spin_lock_irqsave(&kaweth->device_lock, flags); if (IS_BLOCKED(kaweth->status)) { spin_unlock_irqrestore(&kaweth->device_lock, flags); return; } spin_unlock_irqrestore(&kaweth->device_lock, flags); if(status && status != -EREMOTEIO && count != 1) { dev_err(&kaweth->intf->dev, "%s RX status: %d count: %d packet_len: %d\n", net->name, status, count, (int)pkt_len); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(kaweth->net && (count > 2)) { if(pkt_len > (count - 2)) { dev_err(&kaweth->intf->dev, "Packet length too long for USB frame (pkt_len: %x, count: %x)\n", pkt_len, count); dev_err(&kaweth->intf->dev, "Packet len & 2047: %x\n", pkt_len & 2047); dev_err(&kaweth->intf->dev, "Count 2: %x\n", count2); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(!(skb = dev_alloc_skb(pkt_len+2))) { kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, net); netif_rx(skb); net->stats.rx_packets++; net->stats.rx_bytes += pkt_len; } kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); } /**************************************************************** * kaweth_open ****************************************************************/ static int kaweth_open(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); int res; res = usb_autopm_get_interface(kaweth->intf); if (res) { dev_err(&kaweth->intf->dev, "Interface cannot be resumed.\n"); return -EIO; } res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL); if (res) goto err_out; usb_fill_int_urb( kaweth->irq_urb, kaweth->dev, usb_rcvintpipe(kaweth->dev, 3), kaweth->intbuffer, INTBUFFERSIZE, int_callback, kaweth, 250); /* overriding the descriptor */ kaweth->irq_urb->transfer_dma = kaweth->intbufferhandle; kaweth->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL); if (res) { usb_kill_urb(kaweth->rx_urb); goto err_out; } kaweth->opened = 1; netif_start_queue(net); kaweth_async_set_rx_mode(kaweth, true); return 0; err_out: usb_autopm_put_interface(kaweth->intf); return -EIO; } /**************************************************************** * kaweth_kill_urbs ****************************************************************/ static void kaweth_kill_urbs(struct kaweth_device *kaweth) { usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); usb_kill_urb(kaweth->tx_urb); cancel_delayed_work_sync(&kaweth->lowmem_work); /* a scheduled work may have resubmitted, we hit them again */ usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); } /**************************************************************** * kaweth_close ****************************************************************/ static int kaweth_close(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); netif_stop_queue(net); kaweth->opened = 0; kaweth->status |= KAWETH_STATUS_CLOSING; kaweth_kill_urbs(kaweth); kaweth->status &= ~KAWETH_STATUS_CLOSING; usb_autopm_put_interface(kaweth->intf); return 0; } static u32 kaweth_get_link(struct net_device *dev) { struct kaweth_device *kaweth = netdev_priv(dev); return kaweth->linkstate; } static const struct ethtool_ops ops = { .get_link = kaweth_get_link }; /**************************************************************** * kaweth_usb_transmit_complete ****************************************************************/ static void kaweth_usb_transmit_complete(struct urb *urb) { struct kaweth_device *kaweth = urb->context; struct sk_buff *skb = kaweth->tx_skb; int status = urb->status; if (unlikely(status != 0)) if (status != -ENOENT) dev_dbg(&urb->dev->dev, "%s: TX status %d.\n", kaweth->net->name, status); netif_wake_queue(kaweth->net); dev_kfree_skb_irq(skb); } /**************************************************************** * kaweth_start_xmit ****************************************************************/ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __le16 *private_header; int res; spin_lock_irq(&kaweth->device_lock); kaweth_async_set_rx_mode(kaweth, false); netif_stop_queue(net); if (IS_BLOCKED(kaweth->status)) { goto skip; } /* We now decide whether we can put our special header into the sk_buff */ if (skb_cow_head(skb, 2)) { net->stats.tx_errors++; netif_start_queue(net); spin_unlock_irq(&kaweth->device_lock); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } private_header = __skb_push(skb, 2); *private_header = cpu_to_le16(skb->len-2); kaweth->tx_skb = skb; usb_fill_bulk_urb(kaweth->tx_urb, kaweth->dev, usb_sndbulkpipe(kaweth->dev, 2), private_header, skb->len, kaweth_usb_transmit_complete, kaweth); kaweth->end = 0; if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC))) { dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res); skip: net->stats.tx_errors++; netif_start_queue(net); dev_kfree_skb_irq(skb); } else { net->stats.tx_packets++; net->stats.tx_bytes += skb->len; } spin_unlock_irq(&kaweth->device_lock); return NETDEV_TX_OK; } /**************************************************************** * kaweth_set_rx_mode ****************************************************************/ static void kaweth_set_rx_mode(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __u16 packet_filter_bitmap = KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST; netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap); netif_stop_queue(net); if (net->flags & IFF_PROMISC) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS; } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST; } kaweth->packet_filter_bitmap = packet_filter_bitmap; netif_wake_queue(net); } /**************************************************************** * kaweth_async_set_rx_mode ****************************************************************/ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth, bool may_sleep) { int ret; __u16 packet_filter_bitmap = kaweth->packet_filter_bitmap; kaweth->packet_filter_bitmap = 0; if (packet_filter_bitmap == 0) return; if (!may_sleep) return; ret = usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, packet_filter_bitmap, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); if (ret < 0) dev_err(&kaweth->intf->dev, "Failed to set Rx mode: %d\n", ret); else netdev_dbg(kaweth->net, "Set Rx mode to %d\n", packet_filter_bitmap); } /**************************************************************** * kaweth_tx_timeout ****************************************************************/ static void kaweth_tx_timeout(struct net_device *net, unsigned int txqueue) { struct kaweth_device *kaweth = netdev_priv(net); dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name); net->stats.tx_errors++; netif_trans_update(net); usb_unlink_urb(kaweth->tx_urb); } /**************************************************************** * kaweth_suspend ****************************************************************/ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status |= KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); kaweth_kill_urbs(kaweth); return 0; } /**************************************************************** * kaweth_resume ****************************************************************/ static int kaweth_resume(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status &= ~KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); if (!kaweth->opened) return 0; kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); kaweth_resubmit_int_urb(kaweth, GFP_NOIO); return 0; } /**************************************************************** * kaweth_probe ****************************************************************/ static const struct net_device_ops kaweth_netdev_ops = { .ndo_open = kaweth_open, .ndo_stop = kaweth_close, .ndo_start_xmit = kaweth_start_xmit, .ndo_tx_timeout = kaweth_tx_timeout, .ndo_set_rx_mode = kaweth_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ) { struct device *dev = &intf->dev; struct usb_device *udev = interface_to_usbdev(intf); struct kaweth_device *kaweth; struct net_device *netdev; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int result = 0; int rv = -EIO; dev_dbg(dev, "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", udev->devnum, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), le16_to_cpu(udev->descriptor.bcdDevice)); dev_dbg(dev, "Device at %p\n", udev); dev_dbg(dev, "Descriptor length: %x type: %x\n", (int)udev->descriptor.bLength, (int)udev->descriptor.bDescriptorType); netdev = alloc_etherdev(sizeof(*kaweth)); if (!netdev) return -ENOMEM; kaweth = netdev_priv(netdev); kaweth->dev = udev; kaweth->net = netdev; kaweth->intf = intf; spin_lock_init(&kaweth->device_lock); init_waitqueue_head(&kaweth->term_wait); dev_dbg(dev, "Resetting.\n"); kaweth_reset(kaweth); /* * If high byte of bcdDevice is nonzero, firmware is already * downloaded. Don't try to do it again, or we'll hang the device. */ if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) { dev_info(dev, "Firmware present in device.\n"); } else { /* Download the firmware */ dev_info(dev, "Downloading firmware...\n"); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); if (!kaweth->firmware_buf) { rv = -ENOMEM; goto err_free_netdev; } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code.bin", 100, 2)) < 0) { dev_err(dev, "Error downloading firmware (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code_fix.bin", 100, 3)) < 0) { dev_err(dev, "Error downloading firmware fix (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code.bin", 126, 2)) < 0) { dev_err(dev, "Error downloading trigger code (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code_fix.bin", 126, 3)) < 0) { dev_err(dev, "Error downloading trigger code fix (%d)\n", result); goto err_fw; } if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) { dev_err(dev, "Error triggering firmware (%d)\n", result); goto err_fw; } /* Device will now disappear for a moment... */ dev_info(dev, "Firmware loaded. I'll be back...\n"); err_fw: free_page((unsigned long)kaweth->firmware_buf); free_netdev(netdev); return -EIO; } result = kaweth_read_configuration(kaweth); if(result < 0) { dev_err(dev, "Error reading configuration (%d), no net device created\n", result); goto err_free_netdev; } dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); if(!memcmp(&kaweth->configuration.hw_addr, &bcast_addr, sizeof(bcast_addr))) { dev_err(dev, "Firmware not functioning properly, no net device created\n"); goto err_free_netdev; } if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) { dev_dbg(dev, "Error setting URB size\n"); goto err_free_netdev; } if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) { dev_err(dev, "Error setting SOFS wait\n"); goto err_free_netdev; } result = kaweth_set_receive_filter(kaweth, KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST); if(result < 0) { dev_err(dev, "Error setting receive filter\n"); goto err_free_netdev; } dev_dbg(dev, "Initializing net device.\n"); kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->tx_urb) goto err_free_netdev; kaweth->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->rx_urb) goto err_only_tx; kaweth->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->irq_urb) goto err_tx_and_rx; kaweth->intbuffer = usb_alloc_coherent( kaweth->dev, INTBUFFERSIZE, GFP_KERNEL, &kaweth->intbufferhandle); if (!kaweth->intbuffer) goto err_tx_and_rx_and_irq; kaweth->rx_buf = usb_alloc_coherent( kaweth->dev, KAWETH_BUF_SIZE, GFP_KERNEL, &kaweth->rxbufferhandle); if (!kaweth->rx_buf) goto err_all_but_rxbuf; memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr)); eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr); netdev->netdev_ops = &kaweth_netdev_ops; netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size); netdev->ethtool_ops = &ops; /* kaweth is zeroed as part of alloc_netdev */ INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); usb_set_intfdata(intf, kaweth); SET_NETDEV_DEV(netdev, dev); if (register_netdev(netdev) != 0) { dev_err(dev, "Error registering netdev.\n"); goto err_intfdata; } dev_info(dev, "kaweth interface created at %s\n", kaweth->net->name); return 0; err_intfdata: usb_set_intfdata(intf, NULL); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); err_all_but_rxbuf: usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); err_tx_and_rx_and_irq: usb_free_urb(kaweth->irq_urb); err_tx_and_rx: usb_free_urb(kaweth->rx_urb); err_only_tx: usb_free_urb(kaweth->tx_urb); err_free_netdev: free_netdev(netdev); return rv; } /**************************************************************** * kaweth_disconnect ****************************************************************/ static void kaweth_disconnect(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); struct net_device *netdev; usb_set_intfdata(intf, NULL); if (!kaweth) { dev_warn(&intf->dev, "unregistering non-existent device\n"); return; } netdev = kaweth->net; netdev_dbg(kaweth->net, "Unregistering net device\n"); unregister_netdev(netdev); usb_free_urb(kaweth->rx_urb); usb_free_urb(kaweth->tx_urb); usb_free_urb(kaweth->irq_urb); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); free_netdev(netdev); } module_usb_driver(kaweth_driver);
169 127 137 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAP_H #define _LINUX_SWAP_H #include <linux/spinlock.h> #include <linux/linkage.h> #include <linux/mmzone.h> #include <linux/list.h> #include <linux/memcontrol.h> #include <linux/sched.h> #include <linux/node.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/page-flags.h> #include <uapi/linux/mempolicy.h> #include <asm/page.h> struct notifier_block; struct bio; struct pagevec; #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ SWAP_FLAG_DISCARD_PAGES) #define SWAP_BATCH 64 static inline int current_is_kswapd(void) { return current->flags & PF_KSWAPD; } /* * MAX_SWAPFILES defines the maximum number of swaptypes: things which can * be swapped to. The swap type and the offset into that swap type are * encoded into pte's and into pgoff_t's in the swapcache. Using five bits * for the type means that the maximum number of swapcache pages is 27 bits * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs * the type/offset into the pte as 5/27 as well. */ #define MAX_SWAPFILES_SHIFT 5 /* * Use some of the swap files numbers for other purposes. This * is a convenient way to hook into the VM to trigger special * actions on faults. */ /* * PTE markers are used to persist information onto PTEs that otherwise * should be a none pte. As its name "PTE" hints, it should only be * applied to the leaves of pgtables. */ #define SWP_PTE_MARKER_NUM 1 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ SWP_MIGRATION_NUM + SWP_DEVICE_NUM) /* * Unaddressable device memory support. See include/linux/hmm.h and * Documentation/mm/hmm.rst. Short description is we need struct pages for * device memory that is unaddressable (inaccessible) by CPU, so that we can * migrate part of a process memory to device memory. * * When a page is migrated from CPU to device, we set the CPU page table entry * to a special SWP_DEVICE_{READ|WRITE} entry. * * When a page is mapped by the device for exclusive access we set the CPU page * table entries to a special SWP_DEVICE_EXCLUSIVE entry. */ #ifdef CONFIG_DEVICE_PRIVATE #define SWP_DEVICE_NUM 3 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) #define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) #else #define SWP_DEVICE_NUM 0 #endif /* * Page migration support. * * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and * indicates that the referenced (part of) an anonymous page is exclusive to * a single process. For SWP_MIGRATION_WRITE, that information is implicit: * (part of) an anonymous page that are mapped writable are exclusive to a * single process. */ #ifdef CONFIG_MIGRATION #define SWP_MIGRATION_NUM 3 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) #else #define SWP_MIGRATION_NUM 0 #endif /* * Handling of hardware poisoned pages with memory corruption. */ #ifdef CONFIG_MEMORY_FAILURE #define SWP_HWPOISON_NUM 1 #define SWP_HWPOISON MAX_SWAPFILES #else #define SWP_HWPOISON_NUM 0 #endif #define MAX_SWAPFILES \ ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ SWP_PTE_MARKER_NUM) /* * Magic header for a swap area. The first part of the union is * what the swap magic looks like for the old (limited to 128MB) * swap area format, the second part of the union adds - in the * old reserved area - some extra information. Note that the first * kilobyte is reserved for boot loader or disk label stuff... * * Having the magic at the end of the PAGE_SIZE makes detecting swap * areas somewhat tricky on machines that support multiple page sizes. * For 2.5 we'll probably want to move the magic to just beyond the * bootbits... */ union swap_header { struct { char reserved[PAGE_SIZE - 10]; char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ } magic; struct { char bootbits[1024]; /* Space for disklabel etc. */ __u32 version; __u32 last_page; __u32 nr_badpages; unsigned char sws_uuid[16]; unsigned char sws_volume[16]; __u32 padding[117]; __u32 badpages[1]; } info; }; /* * current->reclaim_state points to one of these when a task is running * memory reclaim */ struct reclaim_state { /* pages reclaimed outside of LRU-based reclaim */ unsigned long reclaimed; #ifdef CONFIG_LRU_GEN /* per-thread mm walk data */ struct lru_gen_mm_walk *mm_walk; #endif }; /* * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based * reclaim * @pages: number of pages reclaimed * * If the current process is undergoing a reclaim operation, increment the * number of reclaimed pages by @pages. */ static inline void mm_account_reclaimed_pages(unsigned long pages) { if (current->reclaim_state) current->reclaim_state->reclaimed += pages; } #ifdef __KERNEL__ struct address_space; struct sysinfo; struct writeback_control; struct zone; /* * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart * from setup, they're handled identically. * * We always assume that blocks are of size PAGE_SIZE. */ struct swap_extent { struct rb_node rb_node; pgoff_t start_page; pgoff_t nr_pages; sector_t start_block; }; /* * Max bad pages in the new format.. */ #define MAX_SWAP_BADPAGES \ ((offsetof(union swap_header, magic.magic) - \ offsetof(union swap_header, info.badpages)) / sizeof(int)) enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ SWP_BLKDEV = (1 << 6), /* its a block device */ SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ /* add others here before... */ }; #define SWAP_CLUSTER_MAX 32UL #define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10) #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX /* Bit flag in swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ /* Special value in first swap_map */ #define SWAP_MAP_MAX 0x3e /* Max count */ #define SWAP_MAP_BAD 0x3f /* Note page is bad */ #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ /* Special value in each swap_map continuation */ #define SWAP_CONT_MAX 0x7f /* Max count */ /* * We use this to track usage of a cluster. A cluster is a block of swap disk * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All * free clusters are organized into a list. We fetch an entry from the list to * get a free cluster. * * The flags field determines if a cluster is free. This is * protected by cluster lock. */ struct swap_cluster_info { spinlock_t lock; /* * Protect swap_cluster_info fields * other than list, and swap_info_struct->swap_map * elements corresponding to the swap cluster. */ u16 count; u8 flags; u8 order; struct list_head list; }; /* All on-list cluster must have a non-zero flag. */ enum swap_cluster_flags { CLUSTER_FLAG_NONE = 0, /* For temporary off-list cluster */ CLUSTER_FLAG_FREE, CLUSTER_FLAG_NONFULL, CLUSTER_FLAG_FRAG, /* Clusters with flags above are allocatable */ CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG, CLUSTER_FLAG_FULL, CLUSTER_FLAG_DISCARD, CLUSTER_FLAG_MAX, }; /* * The first page in the swap file is the swap header, which is always marked * bad to prevent it from being allocated as an entry. This also prevents the * cluster to which it belongs being marked free. Therefore 0 is safe to use as * a sentinel to indicate an entry is not valid. */ #define SWAP_ENTRY_INVALID 0 #ifdef CONFIG_THP_SWAP #define SWAP_NR_ORDERS (PMD_ORDER + 1) #else #define SWAP_NR_ORDERS 1 #endif /* * We keep using same cluster for rotational device so IO will be sequential. * The purpose is to optimize SWAP throughput on these device. */ struct swap_sequential_cluster { unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; /* * The in-memory structure used to track swap areas. */ struct swap_info_struct { struct percpu_ref users; /* indicate and keep swap device valid. */ unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct list_head free_clusters; /* free clusters list */ struct list_head full_clusters; /* full clusters list */ struct list_head nonfull_clusters[SWAP_NR_ORDERS]; /* list of cluster that contains at least one free slot */ struct list_head frag_clusters[SWAP_NR_ORDERS]; /* list of cluster that are fragmented or contented */ atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int pages; /* total of usable pages of swap */ atomic_long_t inuse_pages; /* number of those currently in use */ struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ struct completion comp; /* seldom referenced */ spinlock_t lock; /* * protect map scan related fields like * swap_map, lowest_bit, highest_bit, * inuse_pages, cluster_next, * cluster_nr, lowest_alloc, * highest_alloc, free/discard cluster * list. other fields are only changed * at swapon/swapoff, so are protected * by swap_lock. changing flags need * hold this lock and swap_lock. If * both locks need hold, hold swap_lock * first. */ spinlock_t cont_lock; /* * protect swap count continuation page * list. */ struct work_struct discard_work; /* discard worker */ struct work_struct reclaim_work; /* reclaim worker */ struct list_head discard_clusters; /* discard clusters list */ struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node. * Must be last as the number of the * array is nr_node_ids, which is not * a fixed value so have to allocate * dynamically. * And it has to be an array so that * plist_for_each_* can work. */ }; static inline swp_entry_t page_swap_entry(struct page *page) { struct folio *folio = page_folio(page); swp_entry_t entry = folio->swap; entry.val += folio_page_idx(folio, page); return entry; } /* linux/mm/workingset.c */ bool workingset_test_recent(void *shadow, bool file, bool *workingset, bool flush); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); void workingset_activation(struct folio *folio); /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) /* linux/mm/swap.c */ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_io, unsigned int nr_rotated); void lru_note_cost_refault(struct folio *); void folio_add_lru(struct folio *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *); void mark_page_accessed(struct page *); void folio_mark_accessed(struct folio *); extern atomic_t lru_disable_count; static inline bool lru_cache_disabled(void) { return atomic_read(&lru_disable_count); } static inline void lru_cache_enable(void) { atomic_dec(&lru_disable_count); } extern void lru_cache_disable(void); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) #define MIN_SWAPPINESS 0 #define MAX_SWAPPINESS 200 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, unsigned int reclaim_options, int *swappiness); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; long remove_mapping(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_NUMA extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; #endif void check_move_unevictable_folios(struct folio_batch *fbatch); extern void __meminit kswapd_run(int nid); extern void __meminit kswapd_stop(int nid); #ifdef CONFIG_SWAP int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block); int generic_swapfile_activate(struct swap_info_struct *, struct file *, sector_t *); static inline unsigned long total_swapcache_pages(void) { return global_node_page_state(NR_SWAPCACHE); } void free_swap_cache(struct folio *folio); void free_page_and_swap_cache(struct page *); void free_pages_and_swap_cache(struct encoded_page **, int); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; extern atomic_t nr_rotate_swap; /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) { return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; } static inline long get_nr_swap_pages(void) { return atomic_long_read(&nr_swap_pages); } extern void si_swapinfo(struct sysinfo *); int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); struct swap_info_struct *swp_swap_info(swp_entry_t entry); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); extern struct swap_info_struct *get_swap_device(swp_entry_t entry); sector_t swap_folio_sector(struct folio *folio); static inline void put_swap_device(struct swap_info_struct *si) { percpu_ref_put(&si->users); } #else /* CONFIG_SWAP */ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) { return NULL; } static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) { return NULL; } static inline void put_swap_device(struct swap_info_struct *si) { } #define get_nr_swap_pages() 0L #define total_swap_pages 0L #define total_swapcache_pages() 0UL #define vm_swap_full() 0 #define si_swapinfo(val) \ do { (val)->freeswap = (val)->totalswap = 0; } while (0) /* only sparc can not include linux/pagemap.h in this file * so leave put_page and release_pages undeclared... */ #define free_page_and_swap_cache(page) \ put_page(page) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) { } static inline void free_swap_cache(struct folio *folio) { } static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { return 0; } static inline void swap_shmem_alloc(swp_entry_t swp, int nr) { } static inline int swap_duplicate(swp_entry_t swp) { return 0; } static inline int swapcache_prepare(swp_entry_t swp, int nr) { return 0; } static inline void swap_free_nr(swp_entry_t entry, int nr_pages) { } static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) { } static inline int __swap_count(swp_entry_t entry) { return 0; } static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { return false; } static inline int swp_swapcount(swp_entry_t entry) { return 0; } static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask) { return -EINVAL; } static inline bool folio_free_swap(struct folio *folio) { return false; } static inline int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) { return -EINVAL; } #endif /* CONFIG_SWAP */ static inline void free_swap_and_cache(swp_entry_t entry) { free_swap_and_cache_nr(entry, 1); } static inline void swap_free(swp_entry_t entry) { swap_free_nr(entry, 1); } #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { /* Cgroup2 doesn't have per-cgroup swappiness */ if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return READ_ONCE(vm_swappiness); /* root ? */ if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) return READ_ONCE(vm_swappiness); return READ_ONCE(memcg->swappiness); } #else static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) { return READ_ONCE(vm_swappiness); } #endif #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { if (mem_cgroup_disabled()) return; __folio_throttle_swaprate(folio, gfp); } #else static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { } #endif #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { if (mem_cgroup_disabled()) return 0; return __mem_cgroup_try_charge_swap(folio, entry); } extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { if (mem_cgroup_disabled()) return; __mem_cgroup_uncharge_swap(entry, nr_pages); } extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct folio *folio); #else static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { return 0; } static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { } static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) { return get_nr_swap_pages(); } static inline bool mem_cgroup_swap_full(struct folio *folio) { return vm_swap_full(); } #endif #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */
574 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPLETION_H #define __LINUX_COMPLETION_H /* * (C) Copyright 2001 Linus Torvalds * * Atomic wait-for-completion handler data structures. * See kernel/sched/completion.c for details. */ #include <linux/swait.h> /* * struct completion - structure used to maintain state for a "completion" * * This is the opaque structure used to maintain the state for a "completion". * Completions currently use a FIFO to queue threads that have to wait for * the "completion" event. * * See also: complete(), wait_for_completion() (and friends _timeout, * _interruptible, _interruptible_timeout, and _killable), init_completion(), * reinit_completion(), and macros DECLARE_COMPLETION(), * DECLARE_COMPLETION_ONSTACK(). */ struct completion { unsigned int done; struct swait_queue_head wait; }; #define init_completion_map(x, m) init_completion(x) static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} #define COMPLETION_INITIALIZER(work) \ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ (*({ init_completion_map(&(work), &(map)); &(work); })) #define COMPLETION_INITIALIZER_ONSTACK(work) \ (*({ init_completion(&work); &work; })) /** * DECLARE_COMPLETION - declare and initialize a completion structure * @work: identifier for the completion structure * * This macro declares and initializes a completion structure. Generally used * for static declarations. You should use the _ONSTACK variant for automatic * variables. */ #define DECLARE_COMPLETION(work) \ struct completion work = COMPLETION_INITIALIZER(work) /* * Lockdep needs to run a non-constant initializer for on-stack * completions - so we use the _ONSTACK() variant for those that * are on the kernel stack: */ /** * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure * @work: identifier for the completion structure * * This macro declares and initializes a completion structure on the kernel * stack. */ #ifdef CONFIG_LOCKDEP # define DECLARE_COMPLETION_ONSTACK(work) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) #else # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) #endif /** * init_completion - Initialize a dynamically allocated completion * @x: pointer to completion structure that is to be initialized * * This inline function will initialize a dynamically created completion * structure. */ static inline void init_completion(struct completion *x) { x->done = 0; init_swait_queue_head(&x->wait); } /** * reinit_completion - reinitialize a completion structure * @x: pointer to completion structure that is to be reinitialized * * This inline function should be used to reinitialize a completion structure so it can * be reused. This is especially important after complete_all() is used. */ static inline void reinit_completion(struct completion *x) { x->done = 0; } extern void wait_for_completion(struct completion *); extern void wait_for_completion_io(struct completion *); extern int wait_for_completion_interruptible(struct completion *x); extern int wait_for_completion_killable(struct completion *x); extern int wait_for_completion_state(struct completion *x, unsigned int state); extern unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); extern unsigned long wait_for_completion_io_timeout(struct completion *x, unsigned long timeout); extern long wait_for_completion_interruptible_timeout( struct completion *x, unsigned long timeout); extern long wait_for_completion_killable_timeout( struct completion *x, unsigned long timeout); extern bool try_wait_for_completion(struct completion *x); extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_on_current_cpu(struct completion *x); extern void complete_all(struct completion *); #endif
3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 /* SPDX-License-Identifier: GPL-2.0 */ /* * Backlight Lowlevel Control Abstraction * * Copyright (C) 2003,2004 Hewlett-Packard Company * */ #ifndef _LINUX_BACKLIGHT_H #define _LINUX_BACKLIGHT_H #include <linux/device.h> #include <linux/fb.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/types.h> /** * enum backlight_update_reason - what method was used to update backlight * * A driver indicates the method (reason) used for updating the backlight * when calling backlight_force_update(). */ enum backlight_update_reason { /** * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key. */ BACKLIGHT_UPDATE_HOTKEY, /** * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs. */ BACKLIGHT_UPDATE_SYSFS, }; /** * enum backlight_type - the type of backlight control * * The type of interface used to control the backlight. */ enum backlight_type { /** * @BACKLIGHT_RAW: * * The backlight is controlled using hardware registers. */ BACKLIGHT_RAW = 1, /** * @BACKLIGHT_PLATFORM: * * The backlight is controlled using a platform-specific interface. */ BACKLIGHT_PLATFORM, /** * @BACKLIGHT_FIRMWARE: * * The backlight is controlled using a standard firmware interface. */ BACKLIGHT_FIRMWARE, /** * @BACKLIGHT_TYPE_MAX: Number of entries. */ BACKLIGHT_TYPE_MAX, }; /** enum backlight_scale - the type of scale used for brightness values * * The type of scale used for brightness values. */ enum backlight_scale { /** * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown. */ BACKLIGHT_SCALE_UNKNOWN = 0, /** * @BACKLIGHT_SCALE_LINEAR: The scale is linear. * * The linear scale will increase brightness the same for each step. */ BACKLIGHT_SCALE_LINEAR, /** * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear. * * This is often used when the brightness values tries to adjust to * the relative perception of the eye demanding a non-linear scale. */ BACKLIGHT_SCALE_NON_LINEAR, }; struct backlight_device; /** * struct backlight_ops - backlight operations * * The backlight operations are specified when the backlight device is registered. */ struct backlight_ops { /** * @options: Configure how operations are called from the core. * * The options parameter is used to adjust the behaviour of the core. * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called * upon suspend and resume. */ unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) /** * @update_status: Operation called when properties have changed. * * Notify the backlight driver some property has changed. * The update_status operation is protected by the update_lock. * * The backlight driver is expected to use backlight_is_blank() * to check if the display is blanked and set brightness accordingly. * update_status() is called when any of the properties has changed. * * RETURNS: * * 0 on success, negative error code if any failure occurred. */ int (*update_status)(struct backlight_device *); /** * @get_brightness: Return the current backlight brightness. * * The driver may implement this as a readback from the HW. * This operation is optional and if not present then the current * brightness property value is used. * * RETURNS: * * A brightness value which is 0 or a positive number. * On failure a negative error code is returned. */ int (*get_brightness)(struct backlight_device *); /** * @controls_device: Check against the display device * * Check if the backlight controls the given display device. This * operation is optional and if not implemented it is assumed that * the display is always the one controlled by the backlight. * * RETURNS: * * If display_dev is NULL or display_dev matches the device controlled by * the backlight, return true. Otherwise return false. */ bool (*controls_device)(struct backlight_device *bd, struct device *display_dev); }; /** * struct backlight_properties - backlight properties * * This structure defines all the properties of a backlight. */ struct backlight_properties { /** * @brightness: The current brightness requested by the user. * * The backlight core makes sure the range is (0 to max_brightness) * when the brightness is set via the sysfs attribute: * /sys/class/backlight/<backlight>/brightness. * * This value can be set in the backlight_properties passed * to devm_backlight_device_register() to set a default brightness * value. */ int brightness; /** * @max_brightness: The maximum brightness value. * * This value must be set in the backlight_properties passed to * devm_backlight_device_register() and shall not be modified by the * driver after registration. */ int max_brightness; /** * @power: The current power mode. * * User space can configure the power mode using the sysfs * attribute: /sys/class/backlight/<backlight>/bl_power * When the power property is updated update_status() is called. * * The possible values are: (0: full on, 4: full off), see * BACKLIGHT_POWER constants. * * When the backlight device is enabled, @power is set to * BACKLIGHT_POWER_ON. When the backlight device is disabled, * @power is set to BACKLIGHT_POWER_OFF. */ int power; #define BACKLIGHT_POWER_ON (0) #define BACKLIGHT_POWER_OFF (4) #define BACKLIGHT_POWER_REDUCED (1) // deprecated; don't use in new code /** * @type: The type of backlight supported. * * The backlight type allows userspace to make appropriate * policy decisions based on the backlight type. * * This value must be set in the backlight_properties * passed to devm_backlight_device_register(). */ enum backlight_type type; /** * @state: The state of the backlight core. * * The state is a bitmask. BL_CORE_FBBLANK is set when the display * is expected to be blank. BL_CORE_SUSPENDED is set when the * driver is suspended. * * backlight drivers are expected to use backlight_is_blank() * in their update_status() operation rather than reading the * state property. * * The state is maintained by the core and drivers may not modify it. */ unsigned int state; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ /** * @scale: The type of the brightness scale. */ enum backlight_scale scale; }; /** * struct backlight_device - backlight device data * * This structure holds all data required by a backlight device. */ struct backlight_device { /** * @props: Backlight properties */ struct backlight_properties props; /** * @update_lock: The lock used when calling the update_status() operation. * * update_lock is an internal backlight lock that serialise access * to the update_status() operation. The backlight core holds the update_lock * when calling the update_status() operation. The update_lock shall not * be used by backlight drivers. */ struct mutex update_lock; /** * @ops_lock: The lock used around everything related to backlight_ops. * * ops_lock is an internal backlight lock that protects the ops pointer * and is used around all accesses to ops and when the operations are * invoked. The ops_lock shall not be used by backlight drivers. */ struct mutex ops_lock; /** * @ops: Pointer to the backlight operations. * * If ops is NULL, the driver that registered this device has been unloaded, * and if class_get_devdata() points to something in the body of that driver, * it is also invalid. */ const struct backlight_ops *ops; /** * @fb_notif: The framebuffer notifier block */ struct notifier_block fb_notif; /** * @entry: List entry of all registered backlight devices */ struct list_head entry; /** * @dev: Parent device. */ struct device dev; /** * @fb_bl_on: The state of individual fbdev's. * * Multiple fbdev's may share one backlight device. The fb_bl_on * records the state of the individual fbdev. */ bool fb_bl_on[FB_MAX]; /** * @use_count: The number of uses of fb_bl_on. */ int use_count; }; /** * backlight_update_status - force an update of the backlight device status * @bd: the backlight device */ static inline int backlight_update_status(struct backlight_device *bd) { int ret = -ENOENT; mutex_lock(&bd->update_lock); if (bd->ops && bd->ops->update_status) ret = bd->ops->update_status(bd); mutex_unlock(&bd->update_lock); return ret; } /** * backlight_enable - Enable backlight * @bd: the backlight device to enable */ static inline int backlight_enable(struct backlight_device *bd) { if (!bd) return 0; bd->props.power = BACKLIGHT_POWER_ON; bd->props.state &= ~BL_CORE_FBBLANK; return backlight_update_status(bd); } /** * backlight_disable - Disable backlight * @bd: the backlight device to disable */ static inline int backlight_disable(struct backlight_device *bd) { if (!bd) return 0; bd->props.power = BACKLIGHT_POWER_OFF; bd->props.state |= BL_CORE_FBBLANK; return backlight_update_status(bd); } /** * backlight_is_blank - Return true if display is expected to be blank * @bd: the backlight device * * Display is expected to be blank if any of these is true:: * * 1) if power in not UNBLANK * 2) if state indicate BLANK or SUSPENDED * * Returns true if display is expected to be blank, false otherwise. */ static inline bool backlight_is_blank(const struct backlight_device *bd) { return bd->props.power != BACKLIGHT_POWER_ON || bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } /** * backlight_get_brightness - Returns the current brightness value * @bd: the backlight device * * Returns the current brightness value, taking in consideration the current * state. If backlight_is_blank() returns true then return 0 as brightness * otherwise return the current brightness property value. * * Backlight drivers are expected to use this function in their update_status() * operation to get the brightness value. */ static inline int backlight_get_brightness(const struct backlight_device *bd) { if (backlight_is_blank(bd)) return 0; else return bd->props.brightness; } struct backlight_device * backlight_device_register(const char *name, struct device *dev, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props); struct backlight_device * devm_backlight_device_register(struct device *dev, const char *name, struct device *parent, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props); void backlight_device_unregister(struct backlight_device *bd); void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd); void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); struct backlight_device *backlight_device_get_by_name(const char *name); struct backlight_device *backlight_device_get_by_type(enum backlight_type type); int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) /** * bl_get_data - access devdata * @bl_dev: pointer to backlight device * * When a backlight device is registered the driver has the possibility * to supply a void * devdata. bl_get_data() return a pointer to the * devdata. * * RETURNS: * * pointer to devdata stored while registering the backlight device. */ static inline void * bl_get_data(struct backlight_device *bl_dev) { return dev_get_drvdata(&bl_dev->dev); } #ifdef CONFIG_OF struct backlight_device *of_find_backlight_by_node(struct device_node *node); #else static inline struct backlight_device * of_find_backlight_by_node(struct device_node *node) { return NULL; } #endif #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) struct backlight_device *devm_of_find_backlight(struct device *dev); #else static inline struct backlight_device * devm_of_find_backlight(struct device *dev) { return NULL; } #endif #endif
4 4 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. * Author: Joerg Roedel <jroedel@suse.de> */ #define pr_fmt(fmt) "iommu: " fmt #include <linux/amba/bus.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/bits.h> #include <linux/bug.h> #include <linux/types.h> #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/host1x_context_bus.h> #include <linux/iommu.h> #include <linux/iommufd.h> #include <linux/idr.h> #include <linux/err.h> #include <linux/pci.h> #include <linux/pci-ats.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/fsl/mc.h> #include <linux/module.h> #include <linux/cc_platform.h> #include <linux/cdx/cdx_bus.h> #include <trace/events/iommu.h> #include <linux/sched/mm.h> #include <linux/msi.h> #include <uapi/linux/iommufd.h> #include "dma-iommu.h" #include "iommu-priv.h" static struct kset *iommu_group_kset; static DEFINE_IDA(iommu_group_ida); static DEFINE_IDA(iommu_global_pasid_ida); static unsigned int iommu_def_domain_type __read_mostly; static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT); static u32 iommu_cmd_line __read_mostly; /* Tags used with xa_tag_pointer() in group->pasid_array */ enum { IOMMU_PASID_ARRAY_DOMAIN = 0, IOMMU_PASID_ARRAY_HANDLE = 1 }; struct iommu_group { struct kobject kobj; struct kobject *devices_kobj; struct list_head devices; struct xarray pasid_array; struct mutex mutex; void *iommu_data; void (*iommu_data_release)(void *iommu_data); char *name; int id; struct iommu_domain *default_domain; struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; unsigned int owner_cnt; void *owner; }; struct group_device { struct list_head list; struct device *dev; char *name; }; /* Iterate over each struct group_device in a struct iommu_group */ #define for_each_group_device(group, pos) \ list_for_each_entry(pos, &(group)->devices, list) struct iommu_group_attribute { struct attribute attr; ssize_t (*show)(struct iommu_group *group, char *buf); ssize_t (*store)(struct iommu_group *group, const char *buf, size_t count); }; static const char * const iommu_group_resv_type_string[] = { [IOMMU_RESV_DIRECT] = "direct", [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable", [IOMMU_RESV_RESERVED] = "reserved", [IOMMU_RESV_MSI] = "msi", [IOMMU_RESV_SW_MSI] = "msi", }; #define IOMMU_CMD_LINE_DMA_API BIT(0) #define IOMMU_CMD_LINE_STRICT BIT(1) static int bus_iommu_probe(const struct bus_type *bus); static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data); static void iommu_release_device(struct device *dev); static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, unsigned int flags); enum { IOMMU_SET_DOMAIN_MUST_SUCCEED = 1 << 0, }; static int __iommu_device_set_domain(struct iommu_group *group, struct device *dev, struct iommu_domain *new_domain, unsigned int flags); static int __iommu_group_set_domain_internal(struct iommu_group *group, struct iommu_domain *new_domain, unsigned int flags); static int __iommu_group_set_domain(struct iommu_group *group, struct iommu_domain *new_domain) { return __iommu_group_set_domain_internal(group, new_domain, 0); } static void __iommu_group_set_domain_nofail(struct iommu_group *group, struct iommu_domain *new_domain) { WARN_ON(__iommu_group_set_domain_internal( group, new_domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); } static int iommu_setup_default_domain(struct iommu_group *group, int target_type); static int iommu_create_device_direct_mappings(struct iommu_domain *domain, struct device *dev); static ssize_t iommu_group_store_type(struct iommu_group *group, const char *buf, size_t count); static struct group_device *iommu_group_alloc_device(struct iommu_group *group, struct device *dev); static void __iommu_group_free_device(struct iommu_group *group, struct group_device *grp_dev); static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, const struct iommu_ops *ops); #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ __ATTR(_name, _mode, _show, _store) #define to_iommu_group_attr(_attr) \ container_of(_attr, struct iommu_group_attribute, attr) #define to_iommu_group(_kobj) \ container_of(_kobj, struct iommu_group, kobj) static LIST_HEAD(iommu_device_list); static DEFINE_SPINLOCK(iommu_device_lock); static const struct bus_type * const iommu_buses[] = { &platform_bus_type, #ifdef CONFIG_PCI &pci_bus_type, #endif #ifdef CONFIG_ARM_AMBA &amba_bustype, #endif #ifdef CONFIG_FSL_MC_BUS &fsl_mc_bus_type, #endif #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS &host1x_context_device_bus_type, #endif #ifdef CONFIG_CDX_BUS &cdx_bus_type, #endif }; /* * Use a function instead of an array here because the domain-type is a * bit-field, so an array would waste memory. */ static const char *iommu_domain_type_str(unsigned int t) { switch (t) { case IOMMU_DOMAIN_BLOCKED: return "Blocked"; case IOMMU_DOMAIN_IDENTITY: return "Passthrough"; case IOMMU_DOMAIN_UNMANAGED: return "Unmanaged"; case IOMMU_DOMAIN_DMA: case IOMMU_DOMAIN_DMA_FQ: return "Translated"; case IOMMU_DOMAIN_PLATFORM: return "Platform"; default: return "Unknown"; } } static int __init iommu_subsys_init(void) { struct notifier_block *nb; if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) { if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH)) iommu_set_default_passthrough(false); else iommu_set_default_translated(false); if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); iommu_set_default_translated(false); } } if (!iommu_default_passthrough() && !iommu_dma_strict) iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ; pr_info("Default domain type: %s%s\n", iommu_domain_type_str(iommu_def_domain_type), (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ? " (set via kernel command line)" : ""); if (!iommu_default_passthrough()) pr_info("DMA domain TLB invalidation policy: %s mode%s\n", iommu_dma_strict ? "strict" : "lazy", (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ? " (set via kernel command line)" : ""); nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL); if (!nb) return -ENOMEM; for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { nb[i].notifier_call = iommu_bus_notifier; bus_register_notifier(iommu_buses[i], &nb[i]); } return 0; } subsys_initcall(iommu_subsys_init); static int remove_iommu_group(struct device *dev, void *data) { if (dev->iommu && dev->iommu->iommu_dev == data) iommu_release_device(dev); return 0; } /** * iommu_device_register() - Register an IOMMU hardware instance * @iommu: IOMMU handle for the instance * @ops: IOMMU ops to associate with the instance * @hwdev: (optional) actual instance device, used for fwnode lookup * * Return: 0 on success, or an error. */ int iommu_device_register(struct iommu_device *iommu, const struct iommu_ops *ops, struct device *hwdev) { int err = 0; /* We need to be able to take module references appropriately */ if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) return -EINVAL; iommu->ops = ops; if (hwdev) iommu->fwnode = dev_fwnode(hwdev); spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) err = bus_iommu_probe(iommu_buses[i]); if (err) iommu_device_unregister(iommu); return err; } EXPORT_SYMBOL_GPL(iommu_device_register); void iommu_device_unregister(struct iommu_device *iommu) { for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group); spin_lock(&iommu_device_lock); list_del(&iommu->list); spin_unlock(&iommu_device_lock); /* Pairs with the alloc in generic_single_device_group() */ iommu_group_put(iommu->singleton_group); iommu->singleton_group = NULL; } EXPORT_SYMBOL_GPL(iommu_device_unregister); #if IS_ENABLED(CONFIG_IOMMUFD_TEST) void iommu_device_unregister_bus(struct iommu_device *iommu, const struct bus_type *bus, struct notifier_block *nb) { bus_unregister_notifier(bus, nb); iommu_device_unregister(iommu); } EXPORT_SYMBOL_GPL(iommu_device_unregister_bus); /* * Register an iommu driver against a single bus. This is only used by iommufd * selftest to create a mock iommu driver. The caller must provide * some memory to hold a notifier_block. */ int iommu_device_register_bus(struct iommu_device *iommu, const struct iommu_ops *ops, const struct bus_type *bus, struct notifier_block *nb) { int err; iommu->ops = ops; nb->notifier_call = iommu_bus_notifier; err = bus_register_notifier(bus, nb); if (err) return err; spin_lock(&iommu_device_lock); list_add_tail(&iommu->list, &iommu_device_list); spin_unlock(&iommu_device_lock); err = bus_iommu_probe(bus); if (err) { iommu_device_unregister_bus(iommu, bus, nb); return err; } return 0; } EXPORT_SYMBOL_GPL(iommu_device_register_bus); #endif static struct dev_iommu *dev_iommu_get(struct device *dev) { struct dev_iommu *param = dev->iommu; lockdep_assert_held(&iommu_probe_device_lock); if (param) return param; param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) return NULL; mutex_init(&param->lock); dev->iommu = param; return param; } void dev_iommu_free(struct device *dev) { struct dev_iommu *param = dev->iommu; dev->iommu = NULL; if (param->fwspec) { fwnode_handle_put(param->fwspec->iommu_fwnode); kfree(param->fwspec); } kfree(param); } /* * Internal equivalent of device_iommu_mapped() for when we care that a device * actually has API ops, and don't want false positives from VFIO-only groups. */ static bool dev_has_iommu(struct device *dev) { return dev->iommu && dev->iommu->iommu_dev; } static u32 dev_iommu_get_max_pasids(struct device *dev) { u32 max_pasids = 0, bits = 0; int ret; if (dev_is_pci(dev)) { ret = pci_max_pasids(to_pci_dev(dev)); if (ret > 0) max_pasids = ret; } else { ret = device_property_read_u32(dev, "pasid-num-bits", &bits); if (!ret) max_pasids = 1UL << bits; } return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); } void dev_iommu_priv_set(struct device *dev, void *priv) { /* FSL_PAMU does something weird */ if (!IS_ENABLED(CONFIG_FSL_PAMU)) lockdep_assert_held(&iommu_probe_device_lock); dev->iommu->priv = priv; } EXPORT_SYMBOL_GPL(dev_iommu_priv_set); /* * Init the dev->iommu and dev->iommu_group in the struct device and get the * driver probed */ static int iommu_init_device(struct device *dev) { const struct iommu_ops *ops; struct iommu_device *iommu_dev; struct iommu_group *group; int ret; if (!dev_iommu_get(dev)) return -ENOMEM; /* * For FDT-based systems and ACPI IORT/VIOT, the common firmware parsing * is buried in the bus dma_configure path. Properly unpicking that is * still a big job, so for now just invoke the whole thing. The device * already having a driver bound means dma_configure has already run and * either found no IOMMU to wait for, or we're in its replay call right * now, so either way there's no point calling it again. */ if (!dev->driver && dev->bus->dma_configure) { mutex_unlock(&iommu_probe_device_lock); dev->bus->dma_configure(dev); mutex_lock(&iommu_probe_device_lock); } /* * At this point, relevant devices either now have a fwspec which will * match ops registered with a non-NULL fwnode, or we can reasonably * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can * be present, and that any of their registered instances has suitable * ops for probing, and thus cheekily co-opt the same mechanism. */ ops = iommu_fwspec_ops(dev->iommu->fwspec); if (!ops) { ret = -ENODEV; goto err_free; } if (!try_module_get(ops->owner)) { ret = -EINVAL; goto err_free; } iommu_dev = ops->probe_device(dev); if (IS_ERR(iommu_dev)) { ret = PTR_ERR(iommu_dev); goto err_module_put; } dev->iommu->iommu_dev = iommu_dev; ret = iommu_device_link(iommu_dev, dev); if (ret) goto err_release; group = ops->device_group(dev); if (WARN_ON_ONCE(group == NULL)) group = ERR_PTR(-EINVAL); if (IS_ERR(group)) { ret = PTR_ERR(group); goto err_unlink; } dev->iommu_group = group; dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev); if (ops->is_attach_deferred) dev->iommu->attach_deferred = ops->is_attach_deferred(dev); return 0; err_unlink: iommu_device_unlink(iommu_dev, dev); err_release: if (ops->release_device) ops->release_device(dev); err_module_put: module_put(ops->owner); err_free: dev->iommu->iommu_dev = NULL; dev_iommu_free(dev); return ret; } static void iommu_deinit_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; const struct iommu_ops *ops = dev_iommu_ops(dev); lockdep_assert_held(&group->mutex); iommu_device_unlink(dev->iommu->iommu_dev, dev); /* * release_device() must stop using any attached domain on the device. * If there are still other devices in the group, they are not affected * by this callback. * * If the iommu driver provides release_domain, the core code ensures * that domain is attached prior to calling release_device. Drivers can * use this to enforce a translation on the idle iommu. Typically, the * global static blocked_domain is a good choice. * * Otherwise, the iommu driver must set the device to either an identity * or a blocking translation in release_device() and stop using any * domain pointer, as it is going to be freed. * * Regardless, if a delayed attach never occurred, then the release * should still avoid touching any hardware configuration either. */ if (!dev->iommu->attach_deferred && ops->release_domain) ops->release_domain->ops->attach_dev(ops->release_domain, dev); if (ops->release_device) ops->release_device(dev); /* * If this is the last driver to use the group then we must free the * domains before we do the module_put(). */ if (list_empty(&group->devices)) { if (group->default_domain) { iommu_domain_free(group->default_domain); group->default_domain = NULL; } if (group->blocking_domain) { iommu_domain_free(group->blocking_domain); group->blocking_domain = NULL; } group->domain = NULL; } /* Caller must put iommu_group */ dev->iommu_group = NULL; module_put(ops->owner); dev_iommu_free(dev); } static struct iommu_domain *pasid_array_entry_to_domain(void *entry) { if (xa_pointer_tag(entry) == IOMMU_PASID_ARRAY_DOMAIN) return xa_untag_pointer(entry); return ((struct iommu_attach_handle *)xa_untag_pointer(entry))->domain; } DEFINE_MUTEX(iommu_probe_device_lock); static int __iommu_probe_device(struct device *dev, struct list_head *group_list) { struct iommu_group *group; struct group_device *gdev; int ret; /* * Serialise to avoid races between IOMMU drivers registering in * parallel and/or the "replay" calls from ACPI/OF code via client * driver probe. Once the latter have been cleaned up we should * probably be able to use device_lock() here to minimise the scope, * but for now enforcing a simple global ordering is fine. */ lockdep_assert_held(&iommu_probe_device_lock); /* Device is probed already if in a group */ if (dev->iommu_group) return 0; ret = iommu_init_device(dev); if (ret) return ret; /* * And if we do now see any replay calls, they would indicate someone * misusing the dma_configure path outside bus code. */ if (dev->driver) dev_WARN(dev, "late IOMMU probe at driver bind, something fishy here!\n"); group = dev->iommu_group; gdev = iommu_group_alloc_device(group, dev); mutex_lock(&group->mutex); if (IS_ERR(gdev)) { ret = PTR_ERR(gdev); goto err_put_group; } /* * The gdev must be in the list before calling * iommu_setup_default_domain() */ list_add_tail(&gdev->list, &group->devices); WARN_ON(group->default_domain && !group->domain); if (group->default_domain) iommu_create_device_direct_mappings(group->default_domain, dev); if (group->domain) { ret = __iommu_device_set_domain(group, dev, group->domain, 0); if (ret) goto err_remove_gdev; } else if (!group->default_domain && !group_list) { ret = iommu_setup_default_domain(group, 0); if (ret) goto err_remove_gdev; } else if (!group->default_domain) { /* * With a group_list argument we defer the default_domain setup * to the caller by providing a de-duplicated list of groups * that need further setup. */ if (list_empty(&group->entry)) list_add_tail(&group->entry, group_list); } if (group->default_domain) iommu_setup_dma_ops(dev); mutex_unlock(&group->mutex); return 0; err_remove_gdev: list_del(&gdev->list); __iommu_group_free_device(group, gdev); err_put_group: iommu_deinit_device(dev); mutex_unlock(&group->mutex); iommu_group_put(group); return ret; } int iommu_probe_device(struct device *dev) { const struct iommu_ops *ops; int ret; mutex_lock(&iommu_probe_device_lock); ret = __iommu_probe_device(dev, NULL); mutex_unlock(&iommu_probe_device_lock); if (ret) return ret; ops = dev_iommu_ops(dev); if (ops->probe_finalize) ops->probe_finalize(dev); return 0; } static void __iommu_group_free_device(struct iommu_group *group, struct group_device *grp_dev) { struct device *dev = grp_dev->dev; sysfs_remove_link(group->devices_kobj, grp_dev->name); sysfs_remove_link(&dev->kobj, "iommu_group"); trace_remove_device_from_group(group->id, dev); /* * If the group has become empty then ownership must have been * released, and the current domain must be set back to NULL or * the default domain. */ if (list_empty(&group->devices)) WARN_ON(group->owner_cnt || group->domain != group->default_domain); kfree(grp_dev->name); kfree(grp_dev); } /* Remove the iommu_group from the struct device. */ static void __iommu_group_remove_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; struct group_device *device; mutex_lock(&group->mutex); for_each_group_device(group, device) { if (device->dev != dev) continue; list_del(&device->list); __iommu_group_free_device(group, device); if (dev_has_iommu(dev)) iommu_deinit_device(dev); else dev->iommu_group = NULL; break; } mutex_unlock(&group->mutex); /* * Pairs with the get in iommu_init_device() or * iommu_group_add_device() */ iommu_group_put(group); } static void iommu_release_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; if (group) __iommu_group_remove_device(dev); /* Free any fwspec if no iommu_driver was ever attached */ if (dev->iommu) dev_iommu_free(dev); } static int __init iommu_set_def_domain_type(char *str) { bool pt; int ret; ret = kstrtobool(str, &pt); if (ret) return ret; if (pt) iommu_set_default_passthrough(true); else iommu_set_default_translated(true); return 0; } early_param("iommu.passthrough", iommu_set_def_domain_type); static int __init iommu_dma_setup(char *str) { int ret = kstrtobool(str, &iommu_dma_strict); if (!ret) iommu_cmd_line |= IOMMU_CMD_LINE_STRICT; return ret; } early_param("iommu.strict", iommu_dma_setup); void iommu_set_dma_strict(void) { iommu_dma_strict = true; if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ) iommu_def_domain_type = IOMMU_DOMAIN_DMA; } static ssize_t iommu_group_attr_show(struct kobject *kobj, struct attribute *__attr, char *buf) { struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); struct iommu_group *group = to_iommu_group(kobj); ssize_t ret = -EIO; if (attr->show) ret = attr->show(group, buf); return ret; } static ssize_t iommu_group_attr_store(struct kobject *kobj, struct attribute *__attr, const char *buf, size_t count) { struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); struct iommu_group *group = to_iommu_group(kobj); ssize_t ret = -EIO; if (attr->store) ret = attr->store(group, buf, count); return ret; } static const struct sysfs_ops iommu_group_sysfs_ops = { .show = iommu_group_attr_show, .store = iommu_group_attr_store, }; static int iommu_group_create_file(struct iommu_group *group, struct iommu_group_attribute *attr) { return sysfs_create_file(&group->kobj, &attr->attr); } static void iommu_group_remove_file(struct iommu_group *group, struct iommu_group_attribute *attr) { sysfs_remove_file(&group->kobj, &attr->attr); } static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) { return sysfs_emit(buf, "%s\n", group->name); } /** * iommu_insert_resv_region - Insert a new region in the * list of reserved regions. * @new: new region to insert * @regions: list of regions * * Elements are sorted by start address and overlapping segments * of the same type are merged. */ static int iommu_insert_resv_region(struct iommu_resv_region *new, struct list_head *regions) { struct iommu_resv_region *iter, *tmp, *nr, *top; LIST_HEAD(stack); nr = iommu_alloc_resv_region(new->start, new->length, new->prot, new->type, GFP_KERNEL); if (!nr) return -ENOMEM; /* First add the new element based on start address sorting */ list_for_each_entry(iter, regions, list) { if (nr->start < iter->start || (nr->start == iter->start && nr->type <= iter->type)) break; } list_add_tail(&nr->list, &iter->list); /* Merge overlapping segments of type nr->type in @regions, if any */ list_for_each_entry_safe(iter, tmp, regions, list) { phys_addr_t top_end, iter_end = iter->start + iter->length - 1; /* no merge needed on elements of different types than @new */ if (iter->type != new->type) { list_move_tail(&iter->list, &stack); continue; } /* look for the last stack element of same type as @iter */ list_for_each_entry_reverse(top, &stack, list) if (top->type == iter->type) goto check_overlap; list_move_tail(&iter->list, &stack); continue; check_overlap: top_end = top->start + top->length - 1; if (iter->start > top_end + 1) { list_move_tail(&iter->list, &stack); } else { top->length = max(top_end, iter_end) - top->start + 1; list_del(&iter->list); kfree(iter); } } list_splice(&stack, regions); return 0; } static int iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, struct list_head *group_resv_regions) { struct iommu_resv_region *entry; int ret = 0; list_for_each_entry(entry, dev_resv_regions, list) { ret = iommu_insert_resv_region(entry, group_resv_regions); if (ret) break; } return ret; } int iommu_get_group_resv_regions(struct iommu_group *group, struct list_head *head) { struct group_device *device; int ret = 0; mutex_lock(&group->mutex); for_each_group_device(group, device) { struct list_head dev_resv_regions; /* * Non-API groups still expose reserved_regions in sysfs, * so filter out calls that get here that way. */ if (!dev_has_iommu(device->dev)) break; INIT_LIST_HEAD(&dev_resv_regions); iommu_get_resv_regions(device->dev, &dev_resv_regions); ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); iommu_put_resv_regions(device->dev, &dev_resv_regions); if (ret) break; } mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, char *buf) { struct iommu_resv_region *region, *next; struct list_head group_resv_regions; int offset = 0; INIT_LIST_HEAD(&group_resv_regions); iommu_get_group_resv_regions(group, &group_resv_regions); list_for_each_entry_safe(region, next, &group_resv_regions, list) { offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", (long long)region->start, (long long)(region->start + region->length - 1), iommu_group_resv_type_string[region->type]); kfree(region); } return offset; } static ssize_t iommu_group_show_type(struct iommu_group *group, char *buf) { char *type = "unknown"; mutex_lock(&group->mutex); if (group->default_domain) { switch (group->default_domain->type) { case IOMMU_DOMAIN_BLOCKED: type = "blocked"; break; case IOMMU_DOMAIN_IDENTITY: type = "identity"; break; case IOMMU_DOMAIN_UNMANAGED: type = "unmanaged"; break; case IOMMU_DOMAIN_DMA: type = "DMA"; break; case IOMMU_DOMAIN_DMA_FQ: type = "DMA-FQ"; break; } } mutex_unlock(&group->mutex); return sysfs_emit(buf, "%s\n", type); } static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); static IOMMU_GROUP_ATTR(reserved_regions, 0444, iommu_group_show_resv_regions, NULL); static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, iommu_group_store_type); static void iommu_group_release(struct kobject *kobj) { struct iommu_group *group = to_iommu_group(kobj); pr_debug("Releasing group %d\n", group->id); if (group->iommu_data_release) group->iommu_data_release(group->iommu_data); ida_free(&iommu_group_ida, group->id); /* Domains are free'd by iommu_deinit_device() */ WARN_ON(group->default_domain); WARN_ON(group->blocking_domain); kfree(group->name); kfree(group); } static const struct kobj_type iommu_group_ktype = { .sysfs_ops = &iommu_group_sysfs_ops, .release = iommu_group_release, }; /** * iommu_group_alloc - Allocate a new group * * This function is called by an iommu driver to allocate a new iommu * group. The iommu group represents the minimum granularity of the iommu. * Upon successful return, the caller holds a reference to the supplied * group in order to hold the group until devices are added. Use * iommu_group_put() to release this extra reference count, allowing the * group to be automatically reclaimed once it has no devices or external * references. */ struct iommu_group *iommu_group_alloc(void) { struct iommu_group *group; int ret; group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM); group->kobj.kset = iommu_group_kset; mutex_init(&group->mutex); INIT_LIST_HEAD(&group->devices); INIT_LIST_HEAD(&group->entry); xa_init(&group->pasid_array); ret = ida_alloc(&iommu_group_ida, GFP_KERNEL); if (ret < 0) { kfree(group); return ERR_PTR(ret); } group->id = ret; ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, NULL, "%d", group->id); if (ret) { kobject_put(&group->kobj); return ERR_PTR(ret); } group->devices_kobj = kobject_create_and_add("devices", &group->kobj); if (!group->devices_kobj) { kobject_put(&group->kobj); /* triggers .release & free */ return ERR_PTR(-ENOMEM); } /* * The devices_kobj holds a reference on the group kobject, so * as long as that exists so will the group. We can therefore * use the devices_kobj for reference counting. */ kobject_put(&group->kobj); ret = iommu_group_create_file(group, &iommu_group_attr_reserved_regions); if (ret) { kobject_put(group->devices_kobj); return ERR_PTR(ret); } ret = iommu_group_create_file(group, &iommu_group_attr_type); if (ret) { kobject_put(group->devices_kobj); return ERR_PTR(ret); } pr_debug("Allocated group %d\n", group->id); return group; } EXPORT_SYMBOL_GPL(iommu_group_alloc); /** * iommu_group_get_iommudata - retrieve iommu_data registered for a group * @group: the group * * iommu drivers can store data in the group for use when doing iommu * operations. This function provides a way to retrieve it. Caller * should hold a group reference. */ void *iommu_group_get_iommudata(struct iommu_group *group) { return group->iommu_data; } EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); /** * iommu_group_set_iommudata - set iommu_data for a group * @group: the group * @iommu_data: new data * @release: release function for iommu_data * * iommu drivers can store data in the group for use when doing iommu * operations. This function provides a way to set the data after * the group has been allocated. Caller should hold a group reference. */ void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, void (*release)(void *iommu_data)) { group->iommu_data = iommu_data; group->iommu_data_release = release; } EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); /** * iommu_group_set_name - set name for a group * @group: the group * @name: name * * Allow iommu driver to set a name for a group. When set it will * appear in a name attribute file under the group in sysfs. */ int iommu_group_set_name(struct iommu_group *group, const char *name) { int ret; if (group->name) { iommu_group_remove_file(group, &iommu_group_attr_name); kfree(group->name); group->name = NULL; if (!name) return 0; } group->name = kstrdup(name, GFP_KERNEL); if (!group->name) return -ENOMEM; ret = iommu_group_create_file(group, &iommu_group_attr_name); if (ret) { kfree(group->name); group->name = NULL; return ret; } return 0; } EXPORT_SYMBOL_GPL(iommu_group_set_name); static int iommu_create_device_direct_mappings(struct iommu_domain *domain, struct device *dev) { struct iommu_resv_region *entry; struct list_head mappings; unsigned long pg_size; int ret = 0; pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; INIT_LIST_HEAD(&mappings); if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) return -EINVAL; iommu_get_resv_regions(dev, &mappings); /* We need to consider overlapping regions for different devices */ list_for_each_entry(entry, &mappings, list) { dma_addr_t start, end, addr; size_t map_size = 0; if (entry->type == IOMMU_RESV_DIRECT) dev->iommu->require_direct = 1; if ((entry->type != IOMMU_RESV_DIRECT && entry->type != IOMMU_RESV_DIRECT_RELAXABLE) || !iommu_is_dma_domain(domain)) continue; start = ALIGN(entry->start, pg_size); end = ALIGN(entry->start + entry->length, pg_size); for (addr = start; addr <= end; addr += pg_size) { phys_addr_t phys_addr; if (addr == end) goto map_end; phys_addr = iommu_iova_to_phys(domain, addr); if (!phys_addr) { map_size += pg_size; continue; } map_end: if (map_size) { ret = iommu_map(domain, addr - map_size, addr - map_size, map_size, entry->prot, GFP_KERNEL); if (ret) goto out; map_size = 0; } } } out: iommu_put_resv_regions(dev, &mappings); return ret; } /* This is undone by __iommu_group_free_device() */ static struct group_device *iommu_group_alloc_device(struct iommu_group *group, struct device *dev) { int ret, i = 0; struct group_device *device; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return ERR_PTR(-ENOMEM); device->dev = dev; ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); if (ret) goto err_free_device; device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); rename: if (!device->name) { ret = -ENOMEM; goto err_remove_link; } ret = sysfs_create_link_nowarn(group->devices_kobj, &dev->kobj, device->name); if (ret) { if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name. */ kfree(device->name); device->name = kasprintf(GFP_KERNEL, "%s.%d", kobject_name(&dev->kobj), i++); goto rename; } goto err_free_name; } trace_add_device_to_group(group->id, dev); dev_info(dev, "Adding to iommu group %d\n", group->id); return device; err_free_name: kfree(device->name); err_remove_link: sysfs_remove_link(&dev->kobj, "iommu_group"); err_free_device: kfree(device); dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret); return ERR_PTR(ret); } /** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) * @dev: the device * * This function is called by an iommu driver to add a device into a * group. Adding a device increments the group reference count. */ int iommu_group_add_device(struct iommu_group *group, struct device *dev) { struct group_device *gdev; gdev = iommu_group_alloc_device(group, dev); if (IS_ERR(gdev)) return PTR_ERR(gdev); iommu_group_ref_get(group); dev->iommu_group = group; mutex_lock(&group->mutex); list_add_tail(&gdev->list, &group->devices); mutex_unlock(&group->mutex); return 0; } EXPORT_SYMBOL_GPL(iommu_group_add_device); /** * iommu_group_remove_device - remove a device from it's current group * @dev: device to be removed * * This function is called by an iommu driver to remove the device from * it's current group. This decrements the iommu group reference count. */ void iommu_group_remove_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; if (!group) return; dev_info(dev, "Removing from iommu group %d\n", group->id); __iommu_group_remove_device(dev); } EXPORT_SYMBOL_GPL(iommu_group_remove_device); #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) /** * iommu_group_mutex_assert - Check device group mutex lock * @dev: the device that has group param set * * This function is called by an iommu driver to check whether it holds * group mutex lock for the given device or not. * * Note that this function must be called after device group param is set. */ void iommu_group_mutex_assert(struct device *dev) { struct iommu_group *group = dev->iommu_group; lockdep_assert_held(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_group_mutex_assert); #endif static struct device *iommu_group_first_dev(struct iommu_group *group) { lockdep_assert_held(&group->mutex); return list_first_entry(&group->devices, struct group_device, list)->dev; } /** * iommu_group_for_each_dev - iterate over each device in the group * @group: the group * @data: caller opaque data to be passed to callback function * @fn: caller supplied callback function * * This function is called by group users to iterate over group devices. * Callers should hold a reference count to the group during callback. * The group->mutex is held across callbacks, which will block calls to * iommu_group_add/remove_device. */ int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { struct group_device *device; int ret = 0; mutex_lock(&group->mutex); for_each_group_device(group, device) { ret = fn(device->dev, data); if (ret) break; } mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); /** * iommu_group_get - Return the group for a device and increment reference * @dev: get the group that this device belongs to * * This function is called by iommu drivers and users to get the group * for the specified device. If found, the group is returned and the group * reference in incremented, else NULL. */ struct iommu_group *iommu_group_get(struct device *dev) { struct iommu_group *group = dev->iommu_group; if (group) kobject_get(group->devices_kobj); return group; } EXPORT_SYMBOL_GPL(iommu_group_get); /** * iommu_group_ref_get - Increment reference on a group * @group: the group to use, must not be NULL * * This function is called by iommu drivers to take additional references on an * existing group. Returns the given group for convenience. */ struct iommu_group *iommu_group_ref_get(struct iommu_group *group) { kobject_get(group->devices_kobj); return group; } EXPORT_SYMBOL_GPL(iommu_group_ref_get); /** * iommu_group_put - Decrement group reference * @group: the group to use * * This function is called by iommu drivers and users to release the * iommu group. Once the reference count is zero, the group is released. */ void iommu_group_put(struct iommu_group *group) { if (group) kobject_put(group->devices_kobj); } EXPORT_SYMBOL_GPL(iommu_group_put); /** * iommu_group_id - Return ID for a group * @group: the group to ID * * Return the unique ID for the group matching the sysfs group number. */ int iommu_group_id(struct iommu_group *group) { return group->id; } EXPORT_SYMBOL_GPL(iommu_group_id); static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, unsigned long *devfns); /* * To consider a PCI device isolated, we require ACS to support Source * Validation, Request Redirection, Completer Redirection, and Upstream * Forwarding. This effectively means that devices cannot spoof their * requester ID, requests and completions cannot be redirected, and all * transactions are forwarded upstream, even as it passes through a * bridge where the target device is downstream. */ #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) /* * For multifunction devices which are not isolated from each other, find * all the other non-isolated functions and look for existing groups. For * each function, we also need to look for aliases to or from other devices * that may already have a group. */ static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, unsigned long *devfns) { struct pci_dev *tmp = NULL; struct iommu_group *group; if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) return NULL; for_each_pci_dev(tmp) { if (tmp == pdev || tmp->bus != pdev->bus || PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || pci_acs_enabled(tmp, REQ_ACS_FLAGS)) continue; group = get_pci_alias_group(tmp, devfns); if (group) { pci_dev_put(tmp); return group; } } return NULL; } /* * Look for aliases to or from the given device for existing groups. DMA * aliases are only supported on the same bus, therefore the search * space is quite small (especially since we're really only looking at pcie * device, and therefore only expect multiple slots on the root complex or * downstream switch ports). It's conceivable though that a pair of * multifunction devices could have aliases between them that would cause a * loop. To prevent this, we use a bitmap to track where we've been. */ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, unsigned long *devfns) { struct pci_dev *tmp = NULL; struct iommu_group *group; if (test_and_set_bit(pdev->devfn & 0xff, devfns)) return NULL; group = iommu_group_get(&pdev->dev); if (group) return group; for_each_pci_dev(tmp) { if (tmp == pdev || tmp->bus != pdev->bus) continue; /* We alias them or they alias us */ if (pci_devs_are_dma_aliases(pdev, tmp)) { group = get_pci_alias_group(tmp, devfns); if (group) { pci_dev_put(tmp); return group; } group = get_pci_function_alias_group(tmp, devfns); if (group) { pci_dev_put(tmp); return group; } } } return NULL; } struct group_for_pci_data { struct pci_dev *pdev; struct iommu_group *group; }; /* * DMA alias iterator callback, return the last seen device. Stop and return * the IOMMU group if we find one along the way. */ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) { struct group_for_pci_data *data = opaque; data->pdev = pdev; data->group = iommu_group_get(&pdev->dev); return data->group != NULL; } /* * Generic device_group call-back function. It just allocates one * iommu-group per device. */ struct iommu_group *generic_device_group(struct device *dev) { return iommu_group_alloc(); } EXPORT_SYMBOL_GPL(generic_device_group); /* * Generic device_group call-back function. It just allocates one * iommu-group per iommu driver instance shared by every device * probed by that iommu driver. */ struct iommu_group *generic_single_device_group(struct device *dev) { struct iommu_device *iommu = dev->iommu->iommu_dev; if (!iommu->singleton_group) { struct iommu_group *group; group = iommu_group_alloc(); if (IS_ERR(group)) return group; iommu->singleton_group = group; } return iommu_group_ref_get(iommu->singleton_group); } EXPORT_SYMBOL_GPL(generic_single_device_group); /* * Use standard PCI bus topology, isolation features, and DMA alias quirks * to find or create an IOMMU group for a device. */ struct iommu_group *pci_device_group(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct group_for_pci_data data; struct pci_bus *bus; struct iommu_group *group = NULL; u64 devfns[4] = { 0 }; if (WARN_ON(!dev_is_pci(dev))) return ERR_PTR(-EINVAL); /* * Find the upstream DMA alias for the device. A device must not * be aliased due to topology in order to have its own IOMMU group. * If we find an alias along the way that already belongs to a * group, use it. */ if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) return data.group; pdev = data.pdev; /* * Continue upstream from the point of minimum IOMMU granularity * due to aliases to the point where devices are protected from * peer-to-peer DMA by PCI ACS. Again, if we find an existing * group, use it. */ for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { if (!bus->self) continue; if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; pdev = bus->self; group = iommu_group_get(&pdev->dev); if (group) return group; } /* * Look for existing groups on device aliases. If we alias another * device or another device aliases us, use the same group. */ group = get_pci_alias_group(pdev, (unsigned long *)devfns); if (group) return group; /* * Look for existing groups on non-isolated functions on the same * slot and aliases of those funcions, if any. No need to clear * the search bitmap, the tested devfns are still valid. */ group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); if (group) return group; /* No shared group found, allocate new */ return iommu_group_alloc(); } EXPORT_SYMBOL_GPL(pci_device_group); /* Get the IOMMU group for device on fsl-mc bus */ struct iommu_group *fsl_mc_device_group(struct device *dev) { struct device *cont_dev = fsl_mc_cont_dev(dev); struct iommu_group *group; group = iommu_group_get(cont_dev); if (!group) group = iommu_group_alloc(); return group; } EXPORT_SYMBOL_GPL(fsl_mc_device_group); static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) { const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; if (ops->identity_domain) return ops->identity_domain; /* Older drivers create the identity domain via ops->domain_alloc() */ if (!ops->domain_alloc) return ERR_PTR(-EOPNOTSUPP); domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY); if (IS_ERR(domain)) return domain; if (!domain) return ERR_PTR(-ENOMEM); iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); return domain; } static struct iommu_domain * __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { struct device *dev = iommu_group_first_dev(group); struct iommu_domain *dom; if (group->default_domain && group->default_domain->type == req_type) return group->default_domain; /* * When allocating the DMA API domain assume that the driver is going to * use PASID and make sure the RID's domain is PASID compatible. */ if (req_type & __IOMMU_DOMAIN_PAGING) { dom = __iommu_paging_domain_alloc_flags(dev, req_type, dev->iommu->max_pasids ? IOMMU_HWPT_ALLOC_PASID : 0); /* * If driver does not support PASID feature then * try to allocate non-PASID domain */ if (PTR_ERR(dom) == -EOPNOTSUPP) dom = __iommu_paging_domain_alloc_flags(dev, req_type, 0); return dom; } if (req_type == IOMMU_DOMAIN_IDENTITY) return __iommu_alloc_identity_domain(dev); return ERR_PTR(-EINVAL); } /* * req_type of 0 means "auto" which means to select a domain based on * iommu_def_domain_type or what the driver actually supports. */ static struct iommu_domain * iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) { const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group)); struct iommu_domain *dom; lockdep_assert_held(&group->mutex); /* * Allow legacy drivers to specify the domain that will be the default * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM * domain. Do not use in new drivers. */ if (ops->default_domain) { if (req_type != ops->default_domain->type) return ERR_PTR(-EINVAL); return ops->default_domain; } if (req_type) return __iommu_group_alloc_default_domain(group, req_type); /* The driver gave no guidance on what type to use, try the default */ dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); if (!IS_ERR(dom)) return dom; /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) return ERR_PTR(-EINVAL); dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); if (IS_ERR(dom)) return dom; pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", iommu_def_domain_type, group->name); return dom; } struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) { return group->default_domain; } static int probe_iommu_group(struct device *dev, void *data) { struct list_head *group_list = data; int ret; mutex_lock(&iommu_probe_device_lock); ret = __iommu_probe_device(dev, group_list); mutex_unlock(&iommu_probe_device_lock); if (ret == -ENODEV) ret = 0; return ret; } static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; if (action == BUS_NOTIFY_ADD_DEVICE) { int ret; ret = iommu_probe_device(dev); return (ret) ? NOTIFY_DONE : NOTIFY_OK; } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { iommu_release_device(dev); return NOTIFY_OK; } return 0; } /* * Combine the driver's chosen def_domain_type across all the devices in a * group. Drivers must give a consistent result. */ static int iommu_get_def_domain_type(struct iommu_group *group, struct device *dev, int cur_type) { const struct iommu_ops *ops = dev_iommu_ops(dev); int type; if (ops->default_domain) { /* * Drivers that declare a global static default_domain will * always choose that. */ type = ops->default_domain->type; } else { if (ops->def_domain_type) type = ops->def_domain_type(dev); else return cur_type; } if (!type || cur_type == type) return cur_type; if (!cur_type) return type; dev_err_ratelimited( dev, "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n", iommu_domain_type_str(cur_type), iommu_domain_type_str(type), group->id); /* * Try to recover, drivers are allowed to force IDENTITY or DMA, IDENTITY * takes precedence. */ if (type == IOMMU_DOMAIN_IDENTITY) return type; return cur_type; } /* * A target_type of 0 will select the best domain type. 0 can be returned in * this case meaning the global default should be used. */ static int iommu_get_default_domain_type(struct iommu_group *group, int target_type) { struct device *untrusted = NULL; struct group_device *gdev; int driver_type = 0; lockdep_assert_held(&group->mutex); /* * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an * identity_domain and it will automatically become their default * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. * Override the selection to IDENTITY. */ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) && IS_ENABLED(CONFIG_IOMMU_DMA))); driver_type = IOMMU_DOMAIN_IDENTITY; } for_each_group_device(group, gdev) { driver_type = iommu_get_def_domain_type(group, gdev->dev, driver_type); if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) { /* * No ARM32 using systems will set untrusted, it cannot * work. */ if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))) return -1; untrusted = gdev->dev; } } /* * If the common dma ops are not selected in kconfig then we cannot use * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been * selected. */ if (!IS_ENABLED(CONFIG_IOMMU_DMA)) { if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA)) return -1; if (!driver_type) driver_type = IOMMU_DOMAIN_IDENTITY; } if (untrusted) { if (driver_type && driver_type != IOMMU_DOMAIN_DMA) { dev_err_ratelimited( untrusted, "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n", group->id, iommu_domain_type_str(driver_type)); return -1; } driver_type = IOMMU_DOMAIN_DMA; } if (target_type) { if (driver_type && target_type != driver_type) return -1; return target_type; } return driver_type; } static void iommu_group_do_probe_finalize(struct device *dev) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->probe_finalize) ops->probe_finalize(dev); } static int bus_iommu_probe(const struct bus_type *bus) { struct iommu_group *group, *next; LIST_HEAD(group_list); int ret; ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group); if (ret) return ret; list_for_each_entry_safe(group, next, &group_list, entry) { struct group_device *gdev; mutex_lock(&group->mutex); /* Remove item from the list */ list_del_init(&group->entry); /* * We go to the trouble of deferred default domain creation so * that the cross-group default domain type and the setup of the * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios. */ ret = iommu_setup_default_domain(group, 0); if (ret) { mutex_unlock(&group->mutex); return ret; } for_each_group_device(group, gdev) iommu_setup_dma_ops(gdev->dev); mutex_unlock(&group->mutex); /* * FIXME: Mis-locked because the ops->probe_finalize() call-back * of some IOMMU drivers calls arm_iommu_attach_device() which * in-turn might call back into IOMMU core code, where it tries * to take group->mutex, resulting in a deadlock. */ for_each_group_device(group, gdev) iommu_group_do_probe_finalize(gdev->dev); } return 0; } /** * device_iommu_capable() - check for a general IOMMU capability * @dev: device to which the capability would be relevant, if available * @cap: IOMMU capability * * Return: true if an IOMMU is present and supports the given capability * for the given device, otherwise false. */ bool device_iommu_capable(struct device *dev, enum iommu_cap cap) { const struct iommu_ops *ops; if (!dev_has_iommu(dev)) return false; ops = dev_iommu_ops(dev); if (!ops->capable) return false; return ops->capable(dev, cap); } EXPORT_SYMBOL_GPL(device_iommu_capable); /** * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi() * for a group * @group: Group to query * * IOMMU groups should not have differing values of * msi_device_has_isolated_msi() for devices in a group. However nothing * directly prevents this, so ensure mistakes don't result in isolation failures * by checking that all the devices are the same. */ bool iommu_group_has_isolated_msi(struct iommu_group *group) { struct group_device *group_dev; bool ret = true; mutex_lock(&group->mutex); for_each_group_device(group, group_dev) ret &= msi_device_has_isolated_msi(group_dev->dev); mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi); /** * iommu_set_fault_handler() - set a fault handler for an iommu domain * @domain: iommu domain * @handler: fault handler * @token: user data, will be passed back to the fault handler * * This function should be used by IOMMU users which want to be notified * whenever an IOMMU fault happens. * * The fault handler itself should return 0 on success, and an appropriate * error code otherwise. */ void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token) { if (WARN_ON(!domain || domain->cookie_type != IOMMU_COOKIE_NONE)) return; domain->cookie_type = IOMMU_COOKIE_FAULT_HANDLER; domain->handler = handler; domain->handler_token = token; } EXPORT_SYMBOL_GPL(iommu_set_fault_handler); static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, const struct iommu_ops *ops) { domain->type = type; domain->owner = ops; if (!domain->ops) domain->ops = ops->default_domain_ops; /* * If not already set, assume all sizes by default; the driver * may override this later */ if (!domain->pgsize_bitmap) domain->pgsize_bitmap = ops->pgsize_bitmap; } static struct iommu_domain * __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, unsigned int flags) { const struct iommu_ops *ops; struct iommu_domain *domain; if (!dev_has_iommu(dev)) return ERR_PTR(-ENODEV); ops = dev_iommu_ops(dev); if (ops->domain_alloc_paging && !flags) domain = ops->domain_alloc_paging(dev); else if (ops->domain_alloc_paging_flags) domain = ops->domain_alloc_paging_flags(dev, flags, NULL); else if (ops->domain_alloc && !flags) domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); else return ERR_PTR(-EOPNOTSUPP); if (IS_ERR(domain)) return domain; if (!domain) return ERR_PTR(-ENOMEM); iommu_domain_init(domain, type, ops); return domain; } /** * iommu_paging_domain_alloc_flags() - Allocate a paging domain * @dev: device for which the domain is allocated * @flags: Bitmap of iommufd_hwpt_alloc_flags * * Allocate a paging domain which will be managed by a kernel driver. Return * allocated domain if successful, or an ERR pointer for failure. */ struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags) { return __iommu_paging_domain_alloc_flags(dev, IOMMU_DOMAIN_UNMANAGED, flags); } EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc_flags); void iommu_domain_free(struct iommu_domain *domain) { switch (domain->cookie_type) { case IOMMU_COOKIE_DMA_IOVA: iommu_put_dma_cookie(domain); break; case IOMMU_COOKIE_DMA_MSI: iommu_put_msi_cookie(domain); break; case IOMMU_COOKIE_SVA: mmdrop(domain->mm); break; default: break; } if (domain->ops->free) domain->ops->free(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); /* * Put the group's domain back to the appropriate core-owned domain - either the * standard kernel-mode DMA configuration or an all-DMA-blocked domain. */ static void __iommu_group_set_core_domain(struct iommu_group *group) { struct iommu_domain *new_domain; if (group->owner) new_domain = group->blocking_domain; else new_domain = group->default_domain; __iommu_group_set_domain_nofail(group, new_domain); } static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { int ret; if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; ret = domain->ops->attach_dev(domain, dev); if (ret) return ret; dev->iommu->attach_deferred = 0; trace_attach_device_to_domain(dev); return 0; } /** * iommu_attach_device - Attach an IOMMU domain to a device * @domain: IOMMU domain to attach * @dev: Device that will be attached * * Returns 0 on success and error code on failure * * Note that EINVAL can be treated as a soft failure, indicating * that certain configuration of the domain is incompatible with * the device. In this case attaching a different domain to the * device may succeed. */ int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; int ret; if (!group) return -ENODEV; /* * Lock the group to make sure the device-count doesn't * change while we are attaching */ mutex_lock(&group->mutex); ret = -EINVAL; if (list_count_nodes(&group->devices) != 1) goto out_unlock; ret = __iommu_attach_group(domain, group); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_attach_device); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) { if (dev->iommu && dev->iommu->attach_deferred) return __iommu_attach_device(domain, dev); return 0; } void iommu_detach_device(struct iommu_domain *domain, struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; if (!group) return; mutex_lock(&group->mutex); if (WARN_ON(domain != group->domain) || WARN_ON(list_count_nodes(&group->devices) != 1)) goto out_unlock; __iommu_group_set_core_domain(group); out_unlock: mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_device); struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; if (!group) return NULL; return group->domain; } EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); /* * For IOMMU_DOMAIN_DMA implementations which already provide their own * guarantees that the group and its default domain are valid and correct. */ struct iommu_domain *iommu_get_dma_domain(struct device *dev) { return dev->iommu_group->default_domain; } static void *iommu_make_pasid_array_entry(struct iommu_domain *domain, struct iommu_attach_handle *handle) { if (handle) { handle->domain = domain; return xa_tag_pointer(handle, IOMMU_PASID_ARRAY_HANDLE); } return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN); } static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { struct device *dev; if (group->domain && group->domain != group->default_domain && group->domain != group->blocking_domain) return -EBUSY; dev = iommu_group_first_dev(group); if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) return -EINVAL; return __iommu_group_set_domain(group, domain); } /** * iommu_attach_group - Attach an IOMMU domain to an IOMMU group * @domain: IOMMU domain to attach * @group: IOMMU group that will be attached * * Returns 0 on success and error code on failure * * Note that EINVAL can be treated as a soft failure, indicating * that certain configuration of the domain is incompatible with * the group. In this case attaching a different domain to the * group may succeed. */ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { int ret; mutex_lock(&group->mutex); ret = __iommu_attach_group(domain, group); mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_attach_group); static int __iommu_device_set_domain(struct iommu_group *group, struct device *dev, struct iommu_domain *new_domain, unsigned int flags) { int ret; /* * If the device requires IOMMU_RESV_DIRECT then we cannot allow * the blocking domain to be attached as it does not contain the * required 1:1 mapping. This test effectively excludes the device * being used with iommu_group_claim_dma_owner() which will block * vfio and iommufd as well. */ if (dev->iommu->require_direct && (new_domain->type == IOMMU_DOMAIN_BLOCKED || new_domain == group->blocking_domain)) { dev_warn(dev, "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n"); return -EINVAL; } if (dev->iommu->attach_deferred) { if (new_domain == group->default_domain) return 0; dev->iommu->attach_deferred = 0; } ret = __iommu_attach_device(new_domain, dev); if (ret) { /* * If we have a blocking domain then try to attach that in hopes * of avoiding a UAF. Modern drivers should implement blocking * domains as global statics that cannot fail. */ if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) && group->blocking_domain && group->blocking_domain != new_domain) __iommu_attach_device(group->blocking_domain, dev); return ret; } return 0; } /* * If 0 is returned the group's domain is new_domain. If an error is returned * then the group's domain will be set back to the existing domain unless * IOMMU_SET_DOMAIN_MUST_SUCCEED, otherwise an error is returned and the group's * domains is left inconsistent. This is a driver bug to fail attach with a * previously good domain. We try to avoid a kernel UAF because of this. * * IOMMU groups are really the natural working unit of the IOMMU, but the IOMMU * API works on domains and devices. Bridge that gap by iterating over the * devices in a group. Ideally we'd have a single device which represents the * requestor ID of the group, but we also allow IOMMU drivers to create policy * defined minimum sets, where the physical hardware may be able to distiguish * members, but we wish to group them at a higher level (ex. untrusted * multi-function PCI devices). Thus we attach each device. */ static int __iommu_group_set_domain_internal(struct iommu_group *group, struct iommu_domain *new_domain, unsigned int flags) { struct group_device *last_gdev; struct group_device *gdev; int result; int ret; lockdep_assert_held(&group->mutex); if (group->domain == new_domain) return 0; if (WARN_ON(!new_domain)) return -EINVAL; /* * Changing the domain is done by calling attach_dev() on the new * domain. This switch does not have to be atomic and DMA can be * discarded during the transition. DMA must only be able to access * either new_domain or group->domain, never something else. */ result = 0; for_each_group_device(group, gdev) { ret = __iommu_device_set_domain(group, gdev->dev, new_domain, flags); if (ret) { result = ret; /* * Keep trying the other devices in the group. If a * driver fails attach to an otherwise good domain, and * does not support blocking domains, it should at least * drop its reference on the current domain so we don't * UAF. */ if (flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) continue; goto err_revert; } } group->domain = new_domain; return result; err_revert: /* * This is called in error unwind paths. A well behaved driver should * always allow us to attach to a domain that was already attached. */ last_gdev = gdev; for_each_group_device(group, gdev) { /* * A NULL domain can happen only for first probe, in which case * we leave group->domain as NULL and let release clean * everything up. */ if (group->domain) WARN_ON(__iommu_device_set_domain( group, gdev->dev, group->domain, IOMMU_SET_DOMAIN_MUST_SUCCEED)); if (gdev == last_gdev) break; } return ret; } void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_group_set_core_domain(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_group); phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { if (domain->type == IOMMU_DOMAIN_IDENTITY) return iova; if (domain->type == IOMMU_DOMAIN_BLOCKED) return 0; return domain->ops->iova_to_phys(domain, iova); } EXPORT_SYMBOL_GPL(iommu_iova_to_phys); static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, size_t *count) { unsigned int pgsize_idx, pgsize_idx_next; unsigned long pgsizes; size_t offset, pgsize, pgsize_next; unsigned long addr_merge = paddr | iova; /* Page sizes supported by the hardware and small enough for @size */ pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); /* Constrain the page sizes further based on the maximum alignment */ if (likely(addr_merge)) pgsizes &= GENMASK(__ffs(addr_merge), 0); /* Make sure we have at least one suitable page size */ BUG_ON(!pgsizes); /* Pick the biggest page size remaining */ pgsize_idx = __fls(pgsizes); pgsize = BIT(pgsize_idx); if (!count) return pgsize; /* Find the next biggest support page size, if it exists */ pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); if (!pgsizes) goto out_set_count; pgsize_idx_next = __ffs(pgsizes); pgsize_next = BIT(pgsize_idx_next); /* * There's no point trying a bigger page size unless the virtual * and physical addresses are similarly offset within the larger page. */ if ((iova ^ paddr) & (pgsize_next - 1)) goto out_set_count; /* Calculate the offset to the next page size alignment boundary */ offset = pgsize_next - (addr_merge & (pgsize_next - 1)); /* * If size is big enough to accommodate the larger page, reduce * the number of smaller pages. */ if (offset + pgsize_next <= size) size = offset; out_set_count: *count = size >> pgsize_idx; return pgsize; } static int __iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { const struct iommu_domain_ops *ops = domain->ops; unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; phys_addr_t orig_paddr = paddr; int ret = 0; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return -EINVAL; if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) return -ENODEV; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", iova, &paddr, size, min_pagesz); return -EINVAL; } pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { size_t pgsize, count, mapped = 0; pgsize = iommu_pgsize(domain, iova, paddr, size, &count); pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", iova, &paddr, pgsize, count); ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, gfp, &mapped); /* * Some pages may have been mapped, even if an error occurred, * so we should account for those so they can be unmapped. */ size -= mapped; if (ret) break; iova += mapped; paddr += mapped; } /* unroll mapping in case something went wrong */ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); else trace_map(orig_iova, orig_paddr, orig_size); return ret; } int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { const struct iommu_domain_ops *ops = domain->ops; int ret; might_sleep_if(gfpflags_allow_blocking(gfp)); /* Discourage passing strange GFP flags */ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) return -EINVAL; ret = __iommu_map(domain, iova, paddr, size, prot, gfp); if (ret == 0 && ops->iotlb_sync_map) { ret = ops->iotlb_sync_map(domain, iova, size); if (ret) goto out_err; } return ret; out_err: /* undo mappings already done */ iommu_unmap(domain, iova, size); return ret; } EXPORT_SYMBOL_GPL(iommu_map); static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { const struct iommu_domain_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; unsigned long orig_iova = iova; unsigned int min_pagesz; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return 0; if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) return 0; /* find out the minimum page size supported */ min_pagesz = 1 << __ffs(domain->pgsize_bitmap); /* * The virtual address, as well as the size of the mapping, must be * aligned (at least) to the size of the smallest page supported * by the hardware */ if (!IS_ALIGNED(iova | size, min_pagesz)) { pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", iova, size, min_pagesz); return 0; } pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); /* * Keep iterating until we either unmap 'size' bytes (or more) * or we hit an area that isn't mapped. */ while (unmapped < size) { size_t pgsize, count; pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); if (!unmapped_page) break; pr_debug("unmapped: iova 0x%lx size 0x%zx\n", iova, unmapped_page); iova += unmapped_page; unmapped += unmapped_page; } trace_unmap(orig_iova, size, unmapped); return unmapped; } /** * iommu_unmap() - Remove mappings from a range of IOVA * @domain: Domain to manipulate * @iova: IO virtual address to start * @size: Length of the range starting from @iova * * iommu_unmap() will remove a translation created by iommu_map(). It cannot * subdivide a mapping created by iommu_map(), so it should be called with IOVA * ranges that match what was passed to iommu_map(). The range can aggregate * contiguous iommu_map() calls so long as no individual range is split. * * Returns: Number of bytes of IOVA unmapped. iova + res will be the point * unmapping stopped. */ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { struct iommu_iotlb_gather iotlb_gather; size_t ret; iommu_iotlb_gather_init(&iotlb_gather); ret = __iommu_unmap(domain, iova, size, &iotlb_gather); iommu_iotlb_sync(domain, &iotlb_gather); return ret; } EXPORT_SYMBOL_GPL(iommu_unmap); size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { return __iommu_unmap(domain, iova, size, iotlb_gather); } EXPORT_SYMBOL_GPL(iommu_unmap_fast); ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) { const struct iommu_domain_ops *ops = domain->ops; size_t len = 0, mapped = 0; phys_addr_t start; unsigned int i = 0; int ret; might_sleep_if(gfpflags_allow_blocking(gfp)); /* Discourage passing strange GFP flags */ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) return -EINVAL; while (i <= nents) { phys_addr_t s_phys = sg_phys(sg); if (len && s_phys != start + len) { ret = __iommu_map(domain, iova + mapped, start, len, prot, gfp); if (ret) goto out_err; mapped += len; len = 0; } if (sg_dma_is_bus_address(sg)) goto next; if (len) { len += sg->length; } else { len = sg->length; start = s_phys; } next: if (++i < nents) sg = sg_next(sg); } if (ops->iotlb_sync_map) { ret = ops->iotlb_sync_map(domain, iova, mapped); if (ret) goto out_err; } return mapped; out_err: /* undo mappings already done */ iommu_unmap(domain, iova, mapped); return ret; } EXPORT_SYMBOL_GPL(iommu_map_sg); /** * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework * @domain: the iommu domain where the fault has happened * @dev: the device where the fault has happened * @iova: the faulting address * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) * * This function should be called by the low-level IOMMU implementations * whenever IOMMU faults happen, to allow high-level users, that are * interested in such events, to know about them. * * This event may be useful for several possible use cases: * - mere logging of the event * - dynamic TLB/PTE loading * - if restarting of the faulting device is required * * Returns 0 on success and an appropriate error code otherwise (if dynamic * PTE/TLB loading will one day be supported, implementations will be able * to tell whether it succeeded or not according to this return value). * * Specifically, -ENOSYS is returned if a fault handler isn't installed * (though fault handlers can also return -ENOSYS, in case they want to * elicit the default behavior of the IOMMU drivers). */ int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags) { int ret = -ENOSYS; /* * if upper layers showed interest and installed a fault handler, * invoke it. */ if (domain->handler) ret = domain->handler(domain, dev, iova, flags, domain->handler_token); trace_io_page_fault(dev, iova, flags); return ret; } EXPORT_SYMBOL_GPL(report_iommu_fault); static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", NULL, kernel_kobj); BUG_ON(!iommu_group_kset); iommu_debugfs_setup(); return 0; } core_initcall(iommu_init); int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirk) { if (domain->type != IOMMU_DOMAIN_UNMANAGED) return -EINVAL; if (!domain->ops->set_pgtable_quirks) return -EINVAL; return domain->ops->set_pgtable_quirks(domain, quirk); } EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); /** * iommu_get_resv_regions - get reserved regions * @dev: device for which to get reserved regions * @list: reserved region list for device * * This returns a list of reserved IOVA regions specific to this device. * A domain user should not map IOVA in these ranges. */ void iommu_get_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->get_resv_regions) ops->get_resv_regions(dev, list); } EXPORT_SYMBOL_GPL(iommu_get_resv_regions); /** * iommu_put_resv_regions - release reserved regions * @dev: device for which to free reserved regions * @list: reserved region list for device * * This releases a reserved region list acquired by iommu_get_resv_regions(). */ void iommu_put_resv_regions(struct device *dev, struct list_head *list) { struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, list, list) { if (entry->free) entry->free(dev, entry); else kfree(entry); } } EXPORT_SYMBOL(iommu_put_resv_regions); struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type, gfp_t gfp) { struct iommu_resv_region *region; region = kzalloc(sizeof(*region), gfp); if (!region) return NULL; INIT_LIST_HEAD(&region->list); region->start = start; region->length = length; region->prot = prot; region->type = type; return region; } EXPORT_SYMBOL_GPL(iommu_alloc_resv_region); void iommu_set_default_passthrough(bool cmd_line) { if (cmd_line) iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; } void iommu_set_default_translated(bool cmd_line) { if (cmd_line) iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API; iommu_def_domain_type = IOMMU_DOMAIN_DMA; } bool iommu_default_passthrough(void) { return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY; } EXPORT_SYMBOL_GPL(iommu_default_passthrough); const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) { const struct iommu_ops *ops = NULL; struct iommu_device *iommu; spin_lock(&iommu_device_lock); list_for_each_entry(iommu, &iommu_device_list, list) if (iommu->fwnode == fwnode) { ops = iommu->ops; break; } spin_unlock(&iommu_device_lock); return ops; } int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) { const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); if (!ops) return driver_deferred_probe_check_state(dev); if (fwspec) return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; if (!dev_iommu_get(dev)) return -ENOMEM; /* Preallocate for the overwhelmingly common case of 1 ID */ fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); if (!fwspec) return -ENOMEM; fwnode_handle_get(iommu_fwnode); fwspec->iommu_fwnode = iommu_fwnode; dev_iommu_fwspec_set(dev, fwspec); return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_init); void iommu_fwspec_free(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); if (fwspec) { fwnode_handle_put(fwspec->iommu_fwnode); kfree(fwspec); dev_iommu_fwspec_set(dev, NULL); } } int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); int i, new_num; if (!fwspec) return -EINVAL; new_num = fwspec->num_ids + num_ids; if (new_num > 1) { fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), GFP_KERNEL); if (!fwspec) return -ENOMEM; dev_iommu_fwspec_set(dev, fwspec); } for (i = 0; i < num_ids; i++) fwspec->ids[fwspec->num_ids + i] = ids[i]; fwspec->num_ids = new_num; return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); /* * Per device IOMMU features. */ int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) { if (dev_has_iommu(dev)) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->dev_enable_feat) return ops->dev_enable_feat(dev, feat); } return -ENODEV; } EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); /* * The device drivers should do the necessary cleanups before calling this. */ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) { if (dev_has_iommu(dev)) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->dev_disable_feat) return ops->dev_disable_feat(dev, feat); } return -EBUSY; } EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); /** * iommu_setup_default_domain - Set the default_domain for the group * @group: Group to change * @target_type: Domain type to set as the default_domain * * Allocate a default domain and set it as the current domain on the group. If * the group already has a default domain it will be changed to the target_type. * When target_type is 0 the default domain is selected based on driver and * system preferences. */ static int iommu_setup_default_domain(struct iommu_group *group, int target_type) { struct iommu_domain *old_dom = group->default_domain; struct group_device *gdev; struct iommu_domain *dom; bool direct_failed; int req_type; int ret; lockdep_assert_held(&group->mutex); req_type = iommu_get_default_domain_type(group, target_type); if (req_type < 0) return -EINVAL; dom = iommu_group_alloc_default_domain(group, req_type); if (IS_ERR(dom)) return PTR_ERR(dom); if (group->default_domain == dom) return 0; if (iommu_is_dma_domain(dom)) { ret = iommu_get_dma_cookie(dom); if (ret) { iommu_domain_free(dom); return ret; } } /* * IOMMU_RESV_DIRECT and IOMMU_RESV_DIRECT_RELAXABLE regions must be * mapped before their device is attached, in order to guarantee * continuity with any FW activity */ direct_failed = false; for_each_group_device(group, gdev) { if (iommu_create_device_direct_mappings(dom, gdev->dev)) { direct_failed = true; dev_warn_once( gdev->dev->iommu->iommu_dev->dev, "IOMMU driver was not able to establish FW requested direct mapping."); } } /* We must set default_domain early for __iommu_device_set_domain */ group->default_domain = dom; if (!group->domain) { /* * Drivers are not allowed to fail the first domain attach. * The only way to recover from this is to fail attaching the * iommu driver and call ops->release_device. Put the domain * in group->default_domain so it is freed after. */ ret = __iommu_group_set_domain_internal( group, dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); if (WARN_ON(ret)) goto out_free_old; } else { ret = __iommu_group_set_domain(group, dom); if (ret) goto err_restore_def_domain; } /* * Drivers are supposed to allow mappings to be installed in a domain * before device attachment, but some don't. Hack around this defect by * trying again after attaching. If this happens it means the device * will not continuously have the IOMMU_RESV_DIRECT map. */ if (direct_failed) { for_each_group_device(group, gdev) { ret = iommu_create_device_direct_mappings(dom, gdev->dev); if (ret) goto err_restore_domain; } } out_free_old: if (old_dom) iommu_domain_free(old_dom); return ret; err_restore_domain: if (old_dom) __iommu_group_set_domain_internal( group, old_dom, IOMMU_SET_DOMAIN_MUST_SUCCEED); err_restore_def_domain: if (old_dom) { iommu_domain_free(dom); group->default_domain = old_dom; } return ret; } /* * Changing the default domain through sysfs requires the users to unbind the * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ * transition. Return failure if this isn't met. * * We need to consider the race between this and the device release path. * group->mutex is used here to guarantee that the device release path * will not be entered at the same time. */ static ssize_t iommu_group_store_type(struct iommu_group *group, const char *buf, size_t count) { struct group_device *gdev; int ret, req_type; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; if (WARN_ON(!group) || !group->default_domain) return -EINVAL; if (sysfs_streq(buf, "identity")) req_type = IOMMU_DOMAIN_IDENTITY; else if (sysfs_streq(buf, "DMA")) req_type = IOMMU_DOMAIN_DMA; else if (sysfs_streq(buf, "DMA-FQ")) req_type = IOMMU_DOMAIN_DMA_FQ; else if (sysfs_streq(buf, "auto")) req_type = 0; else return -EINVAL; mutex_lock(&group->mutex); /* We can bring up a flush queue without tearing down the domain. */ if (req_type == IOMMU_DOMAIN_DMA_FQ && group->default_domain->type == IOMMU_DOMAIN_DMA) { ret = iommu_dma_init_fq(group->default_domain); if (ret) goto out_unlock; group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; ret = count; goto out_unlock; } /* Otherwise, ensure that device exists and no driver is bound. */ if (list_empty(&group->devices) || group->owner_cnt) { ret = -EPERM; goto out_unlock; } ret = iommu_setup_default_domain(group, req_type); if (ret) goto out_unlock; /* Make sure dma_ops is appropriatley set */ for_each_group_device(group, gdev) iommu_setup_dma_ops(gdev->dev); out_unlock: mutex_unlock(&group->mutex); return ret ?: count; } /** * iommu_device_use_default_domain() - Device driver wants to handle device * DMA through the kernel DMA API. * @dev: The device. * * The device driver about to bind @dev wants to do DMA through the kernel * DMA API. Return 0 if it is allowed, otherwise an error. */ int iommu_device_use_default_domain(struct device *dev) { /* Caller is the driver core during the pre-probe path */ struct iommu_group *group = dev->iommu_group; int ret = 0; if (!group) return 0; mutex_lock(&group->mutex); /* We may race against bus_iommu_probe() finalising groups here */ if (!group->default_domain) { ret = -EPROBE_DEFER; goto unlock_out; } if (group->owner_cnt) { if (group->domain != group->default_domain || group->owner || !xa_empty(&group->pasid_array)) { ret = -EBUSY; goto unlock_out; } } group->owner_cnt++; unlock_out: mutex_unlock(&group->mutex); return ret; } /** * iommu_device_unuse_default_domain() - Device driver stops handling device * DMA through the kernel DMA API. * @dev: The device. * * The device driver doesn't want to do DMA through kernel DMA API anymore. * It must be called after iommu_device_use_default_domain(). */ void iommu_device_unuse_default_domain(struct device *dev) { /* Caller is the driver core during the post-probe path */ struct iommu_group *group = dev->iommu_group; if (!group) return; mutex_lock(&group->mutex); if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array))) group->owner_cnt--; mutex_unlock(&group->mutex); } static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) { struct device *dev = iommu_group_first_dev(group); const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; if (group->blocking_domain) return 0; if (ops->blocked_domain) { group->blocking_domain = ops->blocked_domain; return 0; } /* * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED create an * empty PAGING domain instead. */ domain = iommu_paging_domain_alloc(dev); if (IS_ERR(domain)) return PTR_ERR(domain); group->blocking_domain = domain; return 0; } static int __iommu_take_dma_ownership(struct iommu_group *group, void *owner) { int ret; if ((group->domain && group->domain != group->default_domain) || !xa_empty(&group->pasid_array)) return -EBUSY; ret = __iommu_group_alloc_blocking_domain(group); if (ret) return ret; ret = __iommu_group_set_domain(group, group->blocking_domain); if (ret) return ret; group->owner = owner; group->owner_cnt++; return 0; } /** * iommu_group_claim_dma_owner() - Set DMA ownership of a group * @group: The group. * @owner: Caller specified pointer. Used for exclusive ownership. * * This is to support backward compatibility for vfio which manages the dma * ownership in iommu_group level. New invocations on this interface should be * prohibited. Only a single owner may exist for a group. */ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) { int ret = 0; if (WARN_ON(!owner)) return -EINVAL; mutex_lock(&group->mutex); if (group->owner_cnt) { ret = -EPERM; goto unlock_out; } ret = __iommu_take_dma_ownership(group, owner); unlock_out: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); /** * iommu_device_claim_dma_owner() - Set DMA ownership of a device * @dev: The device. * @owner: Caller specified pointer. Used for exclusive ownership. * * Claim the DMA ownership of a device. Multiple devices in the same group may * concurrently claim ownership if they present the same owner value. Returns 0 * on success and error code on failure */ int iommu_device_claim_dma_owner(struct device *dev, void *owner) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; int ret = 0; if (WARN_ON(!owner)) return -EINVAL; if (!group) return -ENODEV; mutex_lock(&group->mutex); if (group->owner_cnt) { if (group->owner != owner) { ret = -EPERM; goto unlock_out; } group->owner_cnt++; goto unlock_out; } ret = __iommu_take_dma_ownership(group, owner); unlock_out: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner); static void __iommu_release_dma_ownership(struct iommu_group *group) { if (WARN_ON(!group->owner_cnt || !group->owner || !xa_empty(&group->pasid_array))) return; group->owner_cnt = 0; group->owner = NULL; __iommu_group_set_domain_nofail(group, group->default_domain); } /** * iommu_group_release_dma_owner() - Release DMA ownership of a group * @group: The group * * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). */ void iommu_group_release_dma_owner(struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_release_dma_ownership(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); /** * iommu_device_release_dma_owner() - Release DMA ownership of a device * @dev: The device. * * Release the DMA ownership claimed by iommu_device_claim_dma_owner(). */ void iommu_device_release_dma_owner(struct device *dev) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; mutex_lock(&group->mutex); if (group->owner_cnt > 1) group->owner_cnt--; else __iommu_release_dma_ownership(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner); /** * iommu_group_dma_owner_claimed() - Query group dma ownership status * @group: The group. * * This provides status query on a given group. It is racy and only for * non-binding status reporting. */ bool iommu_group_dma_owner_claimed(struct iommu_group *group) { unsigned int user; mutex_lock(&group->mutex); user = group->owner_cnt; mutex_unlock(&group->mutex); return user; } EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); static void iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, struct iommu_domain *domain) { const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *blocked_domain = ops->blocked_domain; WARN_ON(blocked_domain->ops->set_dev_pasid(blocked_domain, dev, pasid, domain)); } static int __iommu_set_group_pasid(struct iommu_domain *domain, struct iommu_group *group, ioasid_t pasid, struct iommu_domain *old) { struct group_device *device, *last_gdev; int ret; for_each_group_device(group, device) { ret = domain->ops->set_dev_pasid(domain, device->dev, pasid, old); if (ret) goto err_revert; } return 0; err_revert: last_gdev = device; for_each_group_device(group, device) { if (device == last_gdev) break; /* * If no old domain, undo the succeeded devices/pasid. * Otherwise, rollback the succeeded devices/pasid to the old * domain. And it is a driver bug to fail attaching with a * previously good domain. */ if (!old || WARN_ON(old->ops->set_dev_pasid(old, device->dev, pasid, domain))) iommu_remove_dev_pasid(device->dev, pasid, domain); } return ret; } static void __iommu_remove_group_pasid(struct iommu_group *group, ioasid_t pasid, struct iommu_domain *domain) { struct group_device *device; for_each_group_device(group, device) iommu_remove_dev_pasid(device->dev, pasid, domain); } /* * iommu_attach_device_pasid() - Attach a domain to pasid of device * @domain: the iommu domain. * @dev: the attached device. * @pasid: the pasid of the device. * @handle: the attach handle. * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle if it intends to pass a valid handle. * * Return: 0 on success, or an error. */ int iommu_attach_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid, struct iommu_attach_handle *handle) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; struct group_device *device; const struct iommu_ops *ops; void *entry; int ret; if (!group) return -ENODEV; ops = dev_iommu_ops(dev); if (!domain->ops->set_dev_pasid || !ops->blocked_domain || !ops->blocked_domain->ops->set_dev_pasid) return -EOPNOTSUPP; if (ops != domain->owner || pasid == IOMMU_NO_PASID) return -EINVAL; mutex_lock(&group->mutex); for_each_group_device(group, device) { if (pasid >= device->dev->iommu->max_pasids) { ret = -EINVAL; goto out_unlock; } } entry = iommu_make_pasid_array_entry(domain, handle); /* * Entry present is a failure case. Use xa_insert() instead of * xa_reserve(). */ ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL); if (ret) goto out_unlock; ret = __iommu_set_group_pasid(domain, group, pasid, NULL); if (ret) { xa_release(&group->pasid_array, pasid); goto out_unlock; } /* * The xa_insert() above reserved the memory, and the group->mutex is * held, this cannot fail. The new domain cannot be visible until the * operation succeeds as we cannot tolerate PRIs becoming concurrently * queued and then failing attach. */ WARN_ON(xa_is_err(xa_store(&group->pasid_array, pasid, entry, GFP_KERNEL))); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_GPL(iommu_attach_device_pasid); /** * iommu_replace_device_pasid - Replace the domain that a specific pasid * of the device is attached to * @domain: the new iommu domain * @dev: the attached device. * @pasid: the pasid of the device. * @handle: the attach handle. * * This API allows the pasid to switch domains. The @pasid should have been * attached. Otherwise, this fails. The pasid will keep the old configuration * if replacement failed. * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle if it intends to pass a valid handle. * * Return 0 on success, or an error. */ int iommu_replace_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid, struct iommu_attach_handle *handle) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; struct iommu_attach_handle *entry; struct iommu_domain *curr_domain; void *curr; int ret; if (!group) return -ENODEV; if (!domain->ops->set_dev_pasid) return -EOPNOTSUPP; if (dev_iommu_ops(dev) != domain->owner || pasid == IOMMU_NO_PASID || !handle) return -EINVAL; mutex_lock(&group->mutex); entry = iommu_make_pasid_array_entry(domain, handle); curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, XA_ZERO_ENTRY, GFP_KERNEL); if (xa_is_err(curr)) { ret = xa_err(curr); goto out_unlock; } /* * No domain (with or without handle) attached, hence not * a replace case. */ if (!curr) { xa_release(&group->pasid_array, pasid); ret = -EINVAL; goto out_unlock; } /* * Reusing handle is problematic as there are paths that refers * the handle without lock. To avoid race, reject the callers that * attempt it. */ if (curr == entry) { WARN_ON(1); ret = -EINVAL; goto out_unlock; } curr_domain = pasid_array_entry_to_domain(curr); ret = 0; if (curr_domain != domain) { ret = __iommu_set_group_pasid(domain, group, pasid, curr_domain); if (ret) goto out_unlock; } /* * The above xa_cmpxchg() reserved the memory, and the * group->mutex is held, this cannot fail. */ WARN_ON(xa_is_err(xa_store(&group->pasid_array, pasid, entry, GFP_KERNEL))); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_NS_GPL(iommu_replace_device_pasid, "IOMMUFD_INTERNAL"); /* * iommu_detach_device_pasid() - Detach the domain from pasid of device * @domain: the iommu domain. * @dev: the attached device. * @pasid: the pasid of the device. * * The @domain must have been attached to @pasid of the @dev with * iommu_attach_device_pasid(). */ void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; mutex_lock(&group->mutex); __iommu_remove_group_pasid(group, pasid, domain); xa_erase(&group->pasid_array, pasid); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); ioasid_t iommu_alloc_global_pasid(struct device *dev) { int ret; /* max_pasids == 0 means that the device does not support PASID */ if (!dev->iommu->max_pasids) return IOMMU_PASID_INVALID; /* * max_pasids is set up by vendor driver based on number of PASID bits * supported but the IDA allocation is inclusive. */ ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID, dev->iommu->max_pasids - 1, GFP_KERNEL); return ret < 0 ? IOMMU_PASID_INVALID : ret; } EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid); void iommu_free_global_pasid(ioasid_t pasid) { if (WARN_ON(pasid == IOMMU_PASID_INVALID)) return; ida_free(&iommu_global_pasid_ida, pasid); } EXPORT_SYMBOL_GPL(iommu_free_global_pasid); /** * iommu_attach_handle_get - Return the attach handle * @group: the iommu group that domain was attached to * @pasid: the pasid within the group * @type: matched domain type, 0 for any match * * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch. * * Return the attach handle to the caller. The life cycle of an iommu attach * handle is from the time when the domain is attached to the time when the * domain is detached. Callers are required to synchronize the call of * iommu_attach_handle_get() with domain attachment and detachment. The attach * handle can only be used during its life cycle. */ struct iommu_attach_handle * iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) { struct iommu_attach_handle *handle; void *entry; xa_lock(&group->pasid_array); entry = xa_load(&group->pasid_array, pasid); if (!entry || xa_pointer_tag(entry) != IOMMU_PASID_ARRAY_HANDLE) { handle = ERR_PTR(-ENOENT); } else { handle = xa_untag_pointer(entry); if (type && handle->domain->type != type) handle = ERR_PTR(-EBUSY); } xa_unlock(&group->pasid_array); return handle; } EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, "IOMMUFD_INTERNAL"); /** * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group * @domain: IOMMU domain to attach * @group: IOMMU group that will be attached * @handle: attach handle * * Returns 0 on success and error code on failure. * * This is a variant of iommu_attach_group(). It allows the caller to provide * an attach handle and use it when the domain is attached. This is currently * used by IOMMUFD to deliver the I/O page faults. * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle. */ int iommu_attach_group_handle(struct iommu_domain *domain, struct iommu_group *group, struct iommu_attach_handle *handle) { void *entry; int ret; if (!handle) return -EINVAL; mutex_lock(&group->mutex); entry = iommu_make_pasid_array_entry(domain, handle); ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL); if (ret) goto out_unlock; ret = __iommu_attach_group(domain, group); if (ret) { xa_release(&group->pasid_array, IOMMU_NO_PASID); goto out_unlock; } /* * The xa_insert() above reserved the memory, and the group->mutex is * held, this cannot fail. The new domain cannot be visible until the * operation succeeds as we cannot tolerate PRIs becoming concurrently * queued and then failing attach. */ WARN_ON(xa_is_err(xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL))); out_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_NS_GPL(iommu_attach_group_handle, "IOMMUFD_INTERNAL"); /** * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group * @domain: IOMMU domain to attach * @group: IOMMU group that will be attached * * Detach the specified IOMMU domain from the specified IOMMU group. * It must be used in conjunction with iommu_attach_group_handle(). */ void iommu_detach_group_handle(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); __iommu_group_set_core_domain(group); xa_erase(&group->pasid_array, IOMMU_NO_PASID); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL"); /** * iommu_replace_group_handle - replace the domain that a group is attached to * @group: IOMMU group that will be attached to the new domain * @new_domain: new IOMMU domain to replace with * @handle: attach handle * * This API allows the group to switch domains without being forced to go to * the blocking domain in-between. It allows the caller to provide an attach * handle for the new domain and use it when the domain is attached. * * If the currently attached domain is a core domain (e.g. a default_domain), * it will act just like the iommu_attach_group_handle(). * * Caller should always provide a new handle to avoid race with the paths * that have lockless reference to handle. */ int iommu_replace_group_handle(struct iommu_group *group, struct iommu_domain *new_domain, struct iommu_attach_handle *handle) { void *curr, *entry; int ret; if (!new_domain || !handle) return -EINVAL; mutex_lock(&group->mutex); entry = iommu_make_pasid_array_entry(new_domain, handle); ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); if (ret) goto err_unlock; ret = __iommu_group_set_domain(group, new_domain); if (ret) goto err_release; curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL); WARN_ON(xa_is_err(curr)); mutex_unlock(&group->mutex); return 0; err_release: xa_release(&group->pasid_array, IOMMU_NO_PASID); err_unlock: mutex_unlock(&group->mutex); return ret; } EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL"); #if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU) /** * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain * @desc: MSI descriptor, will store the MSI page * @msi_addr: MSI target address to be mapped * * The implementation of sw_msi() should take msi_addr and map it to * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the * mapping information. * * Return: 0 on success or negative error code if the mapping failed. */ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) { struct device *dev = msi_desc_to_dev(desc); struct iommu_group *group = dev->iommu_group; int ret = 0; if (!group) return 0; mutex_lock(&group->mutex); /* An IDENTITY domain must pass through */ if (group->domain && group->domain->type != IOMMU_DOMAIN_IDENTITY) { switch (group->domain->cookie_type) { case IOMMU_COOKIE_DMA_MSI: case IOMMU_COOKIE_DMA_IOVA: ret = iommu_dma_sw_msi(group->domain, desc, msi_addr); break; case IOMMU_COOKIE_IOMMUFD: ret = iommufd_sw_msi(group->domain, desc, msi_addr); break; default: ret = -EOPNOTSUPP; break; } } mutex_unlock(&group->mutex); return ret; } #endif /* CONFIG_IRQ_MSI_IOMMU */
3197 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 // SPDX-License-Identifier: GPL-2.0+ /* * Universal/legacy driver for 8250/16550-type serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2001 Russell King. * * Supports: * early_serial_setup() ports * userspace-configurable "phantom" ports * serial8250_register_8250_port() ports */ #include <linux/acpi.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/tty.h> #include <linux/ratelimit.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/nmi.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/irq.h> #include "8250.h" #define PASS_LIMIT 512 struct irq_info { struct hlist_node node; int irq; spinlock_t lock; /* Protects list not the hash */ struct list_head *head; }; #define NR_IRQ_HASH 32 /* Can be adjusted later */ static struct hlist_head irq_lists[NR_IRQ_HASH]; static DEFINE_MUTEX(hash_mutex); /* Used to walk the hash */ /* * This is the serial driver's interrupt routine. * * Arjan thinks the old way was overly complex, so it got simplified. * Alan disagrees, saying that need the complexity to handle the weird * nature of ISA shared interrupts. (This is a special exception.) * * In order to handle ISA shared interrupts properly, we need to check * that all ports have been serviced, and therefore the ISA interrupt * line has been de-asserted. * * This means we need to loop through all ports. checking that they * don't have an interrupt pending. */ static irqreturn_t serial8250_interrupt(int irq, void *dev_id) { struct irq_info *i = dev_id; struct list_head *l, *end = NULL; int pass_counter = 0, handled = 0; pr_debug("%s(%d): start\n", __func__, irq); spin_lock(&i->lock); l = i->head; do { struct uart_8250_port *up; struct uart_port *port; up = list_entry(l, struct uart_8250_port, list); port = &up->port; if (port->handle_irq(port)) { handled = 1; end = NULL; } else if (end == NULL) end = l; l = l->next; if (l == i->head && pass_counter++ > PASS_LIMIT) break; } while (l != end); spin_unlock(&i->lock); pr_debug("%s(%d): end\n", __func__, irq); return IRQ_RETVAL(handled); } /* * To support ISA shared interrupts, we need to have one interrupt * handler that ensures that the IRQ line has been deasserted * before returning. Failing to do this will result in the IRQ * line being stuck active, and, since ISA irqs are edge triggered, * no more IRQs will be seen. */ static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up) { spin_lock_irq(&i->lock); if (!list_empty(i->head)) { if (i->head == &up->list) i->head = i->head->next; list_del(&up->list); } else { BUG_ON(i->head != &up->list); i->head = NULL; } spin_unlock_irq(&i->lock); /* List empty so throw away the hash node */ if (i->head == NULL) { hlist_del(&i->node); kfree(i); } } static int serial_link_irq_chain(struct uart_8250_port *up) { struct hlist_head *h; struct irq_info *i; int ret; mutex_lock(&hash_mutex); h = &irq_lists[up->port.irq % NR_IRQ_HASH]; hlist_for_each_entry(i, h, node) if (i->irq == up->port.irq) break; if (i == NULL) { i = kzalloc(sizeof(struct irq_info), GFP_KERNEL); if (i == NULL) { mutex_unlock(&hash_mutex); return -ENOMEM; } spin_lock_init(&i->lock); i->irq = up->port.irq; hlist_add_head(&i->node, h); } mutex_unlock(&hash_mutex); spin_lock_irq(&i->lock); if (i->head) { list_add(&up->list, i->head); spin_unlock_irq(&i->lock); ret = 0; } else { INIT_LIST_HEAD(&up->list); i->head = &up->list; spin_unlock_irq(&i->lock); ret = request_irq(up->port.irq, serial8250_interrupt, up->port.irqflags, up->port.name, i); if (ret < 0) serial_do_unlink(i, up); } return ret; } static void serial_unlink_irq_chain(struct uart_8250_port *up) { struct irq_info *i; struct hlist_head *h; mutex_lock(&hash_mutex); h = &irq_lists[up->port.irq % NR_IRQ_HASH]; hlist_for_each_entry(i, h, node) if (i->irq == up->port.irq) break; BUG_ON(i == NULL); BUG_ON(i->head == NULL); if (list_empty(i->head)) free_irq(up->port.irq, i); serial_do_unlink(i, up); mutex_unlock(&hash_mutex); } /* * This function is used to handle ports that do not have an * interrupt. This doesn't work very well for 16450's, but gives * barely passable results for a 16550A. (Although at the expense * of much CPU overhead). */ static void serial8250_timeout(struct timer_list *t) { struct uart_8250_port *up = from_timer(up, t, timer); up->port.handle_irq(&up->port); mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port)); } static void serial8250_backup_timeout(struct timer_list *t) { struct uart_8250_port *up = from_timer(up, t, timer); unsigned int iir, ier = 0, lsr; unsigned long flags; uart_port_lock_irqsave(&up->port, &flags); /* * Must disable interrupts or else we risk racing with the interrupt * based handler. */ if (up->port.irq) { ier = serial_in(up, UART_IER); serial_out(up, UART_IER, 0); } iir = serial_in(up, UART_IIR); /* * This should be a safe test for anyone who doesn't trust the * IIR bits on their UART, but it's specifically designed for * the "Diva" UART used on the management processor on many HP * ia64 and parisc boxes. */ lsr = serial_lsr_in(up); if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && (!kfifo_is_empty(&up->port.state->port.xmit_fifo) || up->port.x_char) && (lsr & UART_LSR_THRE)) { iir &= ~(UART_IIR_ID | UART_IIR_NO_INT); iir |= UART_IIR_THRI; } if (!(iir & UART_IIR_NO_INT)) serial8250_tx_chars(up); if (up->port.irq) serial_out(up, UART_IER, ier); uart_port_unlock_irqrestore(&up->port, flags); /* Standard timer interval plus 0.2s to keep the port running */ mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port) + HZ / 5); } static void univ8250_setup_timer(struct uart_8250_port *up) { struct uart_port *port = &up->port; /* * The above check will only give an accurate result the first time * the port is opened so this value needs to be preserved. */ if (up->bugs & UART_BUG_THRE) { pr_debug("%s - using backup timer\n", port->name); up->timer.function = serial8250_backup_timeout; mod_timer(&up->timer, jiffies + uart_poll_timeout(port) + HZ / 5); } /* * If the "interrupt" for this port doesn't correspond with any * hardware interrupt, we use a timer-based system. The original * driver used to do this with IRQ0. */ if (!port->irq) mod_timer(&up->timer, jiffies + uart_poll_timeout(port)); } static int univ8250_setup_irq(struct uart_8250_port *up) { struct uart_port *port = &up->port; if (port->irq) return serial_link_irq_chain(up); return 0; } static void univ8250_release_irq(struct uart_8250_port *up) { struct uart_port *port = &up->port; timer_delete_sync(&up->timer); up->timer.function = serial8250_timeout; if (port->irq) serial_unlink_irq_chain(up); } const struct uart_ops *univ8250_port_base_ops = NULL; struct uart_ops univ8250_port_ops; static const struct uart_8250_ops univ8250_driver_ops = { .setup_irq = univ8250_setup_irq, .release_irq = univ8250_release_irq, .setup_timer = univ8250_setup_timer, }; static struct uart_8250_port serial8250_ports[UART_NR]; /** * serial8250_get_port - retrieve struct uart_8250_port * @line: serial line number * * This function retrieves struct uart_8250_port for the specific line. * This struct *must* *not* be used to perform a 8250 or serial core operation * which is not accessible otherwise. Its only purpose is to make the struct * accessible to the runtime-pm callbacks for context suspend/restore. * The lock assumption made here is none because runtime-pm suspend/resume * callbacks should not be invoked if there is any operation performed on the * port. */ struct uart_8250_port *serial8250_get_port(int line) { return &serial8250_ports[line]; } EXPORT_SYMBOL_GPL(serial8250_get_port); static inline void serial8250_apply_quirks(struct uart_8250_port *up) { up->port.quirks |= skip_txen_test ? UPQ_NO_TXEN_TEST : 0; } struct uart_8250_port *serial8250_setup_port(int index) { struct uart_8250_port *up; if (index >= UART_NR) return NULL; up = &serial8250_ports[index]; up->port.line = index; up->port.port_id = index; serial8250_init_port(up); if (!univ8250_port_base_ops) univ8250_port_base_ops = up->port.ops; up->port.ops = &univ8250_port_ops; timer_setup(&up->timer, serial8250_timeout, 0); up->ops = &univ8250_driver_ops; serial8250_set_defaults(up); return up; } void __init serial8250_register_ports(struct uart_driver *drv, struct device *dev) { int i; for (i = 0; i < nr_uarts; i++) { struct uart_8250_port *up = &serial8250_ports[i]; if (up->port.type == PORT_8250_CIR) continue; if (up->port.dev) continue; up->port.dev = dev; if (uart_console_registered(&up->port)) pm_runtime_get_sync(up->port.dev); serial8250_apply_quirks(up); uart_add_one_port(drv, &up->port); } } #ifdef CONFIG_SERIAL_8250_CONSOLE static void univ8250_console_write(struct console *co, const char *s, unsigned int count) { struct uart_8250_port *up = &serial8250_ports[co->index]; serial8250_console_write(up, s, count); } static int univ8250_console_setup(struct console *co, char *options) { struct uart_8250_port *up; struct uart_port *port; int retval, i; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index < 0 || co->index >= UART_NR) co->index = 0; /* * If the console is past the initial isa ports, init more ports up to * co->index as needed and increment nr_uarts accordingly. */ for (i = nr_uarts; i <= co->index; i++) { up = serial8250_setup_port(i); if (!up) return -ENODEV; nr_uarts++; } port = &serial8250_ports[co->index].port; /* link port to console */ uart_port_set_cons(port, co); retval = serial8250_console_setup(port, options, false); if (retval != 0) uart_port_set_cons(port, NULL); return retval; } static int univ8250_console_exit(struct console *co) { struct uart_port *port; port = &serial8250_ports[co->index].port; return serial8250_console_exit(port); } /** * univ8250_console_match - non-standard console matching * @co: registering console * @name: name from console command line * @idx: index from console command line * @options: ptr to option string from console command line * * Only attempts to match console command lines of the form: * console=uart[8250],io|mmio|mmio16|mmio32,<addr>[,<options>] * console=uart[8250],0x<addr>[,<options>] * This form is used to register an initial earlycon boot console and * replace it with the serial8250_console at 8250 driver init. * * Performs console setup for a match (as required by interface) * If no <options> are specified, then assume the h/w is already setup. * * Returns 0 if console matches; otherwise non-zero to use default matching */ static int univ8250_console_match(struct console *co, char *name, int idx, char *options) { char match[] = "uart"; /* 8250-specific earlycon name */ unsigned char iotype; resource_size_t addr; int i; if (strncmp(name, match, 4) != 0) return -ENODEV; if (uart_parse_earlycon(options, &iotype, &addr, &options)) return -ENODEV; /* try to match the port specified on the command line */ for (i = 0; i < nr_uarts; i++) { struct uart_port *port = &serial8250_ports[i].port; if (port->iotype != iotype) continue; if ((iotype == UPIO_MEM || iotype == UPIO_MEM16 || iotype == UPIO_MEM32 || iotype == UPIO_MEM32BE) && (port->mapbase != addr)) continue; if (iotype == UPIO_PORT && port->iobase != addr) continue; co->index = i; uart_port_set_cons(port, co); return serial8250_console_setup(port, options, true); } return -ENODEV; } static struct console univ8250_console = { .name = "ttyS", .write = univ8250_console_write, .device = uart_console_device, .setup = univ8250_console_setup, .exit = univ8250_console_exit, .match = univ8250_console_match, .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, .data = &serial8250_reg, }; static int __init univ8250_console_init(void) { if (nr_uarts == 0) return -ENODEV; serial8250_isa_init_ports(); register_console(&univ8250_console); return 0; } console_initcall(univ8250_console_init); #define SERIAL8250_CONSOLE (&univ8250_console) #else #define SERIAL8250_CONSOLE NULL #endif struct uart_driver serial8250_reg = { .owner = THIS_MODULE, .driver_name = "serial", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .cons = SERIAL8250_CONSOLE, }; /* * early_serial_setup - early registration for 8250 ports * * Setup an 8250 port structure prior to console initialisation. Use * after console initialisation will cause undefined behaviour. */ int __init early_serial_setup(struct uart_port *port) { struct uart_port *p; if (port->line >= ARRAY_SIZE(serial8250_ports) || nr_uarts == 0) return -ENODEV; serial8250_isa_init_ports(); p = &serial8250_ports[port->line].port; p->iobase = port->iobase; p->membase = port->membase; p->irq = port->irq; p->irqflags = port->irqflags; p->uartclk = port->uartclk; p->fifosize = port->fifosize; p->regshift = port->regshift; p->iotype = port->iotype; p->flags = port->flags; p->mapbase = port->mapbase; p->mapsize = port->mapsize; p->private_data = port->private_data; p->type = port->type; p->line = port->line; serial8250_set_defaults(up_to_u8250p(p)); if (port->serial_in) p->serial_in = port->serial_in; if (port->serial_out) p->serial_out = port->serial_out; if (port->handle_irq) p->handle_irq = port->handle_irq; return 0; } /** * serial8250_suspend_port - suspend one serial port * @line: serial line number * * Suspend one serial port. */ void serial8250_suspend_port(int line) { struct uart_8250_port *up = &serial8250_ports[line]; struct uart_port *port = &up->port; if (!console_suspend_enabled && uart_console(port) && port->type != PORT_8250) { unsigned char canary = 0xa5; serial_out(up, UART_SCR, canary); if (serial_in(up, UART_SCR) == canary) up->canary = canary; } uart_suspend_port(&serial8250_reg, port); } EXPORT_SYMBOL(serial8250_suspend_port); /** * serial8250_resume_port - resume one serial port * @line: serial line number * * Resume one serial port. */ void serial8250_resume_port(int line) { struct uart_8250_port *up = &serial8250_ports[line]; struct uart_port *port = &up->port; up->canary = 0; if (up->capabilities & UART_NATSEMI) { /* Ensure it's still in high speed mode */ serial_port_out(port, UART_LCR, 0xE0); ns16550a_goto_highspeed(up); serial_port_out(port, UART_LCR, 0); port->uartclk = 921600*16; } uart_resume_port(&serial8250_reg, port); } EXPORT_SYMBOL(serial8250_resume_port); /* * serial8250_register_8250_port and serial8250_unregister_port allows for * 16x50 serial ports to be configured at run-time, to support PCMCIA * modems and PCI multiport cards. */ static DEFINE_MUTEX(serial_mutex); static struct uart_8250_port *serial8250_find_match_or_unused(const struct uart_port *port) { int i; /* * First, find a port entry which matches. */ for (i = 0; i < nr_uarts; i++) if (uart_match_port(&serial8250_ports[i].port, port)) return &serial8250_ports[i]; /* try line number first if still available */ i = port->line; if (i < nr_uarts && serial8250_ports[i].port.type == PORT_UNKNOWN && serial8250_ports[i].port.iobase == 0) return &serial8250_ports[i]; /* * We didn't find a matching entry, so look for the first * free entry. We look for one which hasn't been previously * used (indicated by zero iobase). */ for (i = 0; i < nr_uarts; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN && serial8250_ports[i].port.iobase == 0) return &serial8250_ports[i]; /* * That also failed. Last resort is to find any entry which * doesn't have a real port associated with it. */ for (i = 0; i < nr_uarts; i++) if (serial8250_ports[i].port.type == PORT_UNKNOWN) return &serial8250_ports[i]; return NULL; } static void serial_8250_overrun_backoff_work(struct work_struct *work) { struct uart_8250_port *up = container_of(to_delayed_work(work), struct uart_8250_port, overrun_backoff); struct uart_port *port = &up->port; unsigned long flags; uart_port_lock_irqsave(port, &flags); up->ier |= UART_IER_RLSI | UART_IER_RDI; serial_out(up, UART_IER, up->ier); uart_port_unlock_irqrestore(port, flags); } /** * serial8250_register_8250_port - register a serial port * @up: serial port template * * Configure the serial port specified by the request. If the * port exists and is in use, it is hung up and unregistered * first. * * The port is then probed and if necessary the IRQ is autodetected * If this fails an error is returned. * * On success the port is ready to use and the line number is returned. */ int serial8250_register_8250_port(const struct uart_8250_port *up) { struct uart_8250_port *uart; int ret = -ENOSPC; if (up->port.uartclk == 0) return -EINVAL; mutex_lock(&serial_mutex); uart = serial8250_find_match_or_unused(&up->port); if (!uart) { /* * If the port is past the initial isa ports, initialize a new * port and increment nr_uarts accordingly. */ uart = serial8250_setup_port(nr_uarts); if (!uart) goto unlock; nr_uarts++; } if (uart->port.type != PORT_8250_CIR) { struct mctrl_gpios *gpios; if (uart->port.dev) uart_remove_one_port(&serial8250_reg, &uart->port); uart->port.ctrl_id = up->port.ctrl_id; uart->port.port_id = up->port.port_id; uart->port.iobase = up->port.iobase; uart->port.membase = up->port.membase; uart->port.irq = up->port.irq; uart->port.irqflags = up->port.irqflags; uart->port.uartclk = up->port.uartclk; uart->port.fifosize = up->port.fifosize; uart->port.regshift = up->port.regshift; uart->port.iotype = up->port.iotype; uart->port.flags = up->port.flags | UPF_BOOT_AUTOCONF; uart->bugs = up->bugs; uart->port.mapbase = up->port.mapbase; uart->port.mapsize = up->port.mapsize; uart->port.private_data = up->port.private_data; uart->tx_loadsz = up->tx_loadsz; uart->capabilities = up->capabilities; uart->port.throttle = up->port.throttle; uart->port.unthrottle = up->port.unthrottle; uart->port.rs485_config = up->port.rs485_config; uart->port.rs485_supported = up->port.rs485_supported; uart->port.rs485 = up->port.rs485; uart->rs485_start_tx = up->rs485_start_tx; uart->rs485_stop_tx = up->rs485_stop_tx; uart->lsr_save_mask = up->lsr_save_mask; uart->dma = up->dma; /* Take tx_loadsz from fifosize if it wasn't set separately */ if (uart->port.fifosize && !uart->tx_loadsz) uart->tx_loadsz = uart->port.fifosize; if (up->port.dev) { uart->port.dev = up->port.dev; ret = uart_get_rs485_mode(&uart->port); if (ret) goto err; } if (up->port.flags & UPF_FIXED_TYPE) uart->port.type = up->port.type; /* * Only call mctrl_gpio_init(), if the device has no ACPI * companion device */ if (!has_acpi_companion(uart->port.dev)) { gpios = mctrl_gpio_init(&uart->port, 0); if (IS_ERR(gpios)) { ret = PTR_ERR(gpios); goto err; } else { uart->gpios = gpios; } } serial8250_set_defaults(uart); /* Possibly override default I/O functions. */ if (up->port.serial_in) uart->port.serial_in = up->port.serial_in; if (up->port.serial_out) uart->port.serial_out = up->port.serial_out; if (up->port.handle_irq) uart->port.handle_irq = up->port.handle_irq; /* Possibly override set_termios call */ if (up->port.set_termios) uart->port.set_termios = up->port.set_termios; if (up->port.set_ldisc) uart->port.set_ldisc = up->port.set_ldisc; if (up->port.get_mctrl) uart->port.get_mctrl = up->port.get_mctrl; if (up->port.set_mctrl) uart->port.set_mctrl = up->port.set_mctrl; if (up->port.get_divisor) uart->port.get_divisor = up->port.get_divisor; if (up->port.set_divisor) uart->port.set_divisor = up->port.set_divisor; if (up->port.startup) uart->port.startup = up->port.startup; if (up->port.shutdown) uart->port.shutdown = up->port.shutdown; if (up->port.pm) uart->port.pm = up->port.pm; if (up->port.handle_break) uart->port.handle_break = up->port.handle_break; if (up->dl_read) uart->dl_read = up->dl_read; if (up->dl_write) uart->dl_write = up->dl_write; if (uart->port.type != PORT_8250_CIR) { if (uart_console_registered(&uart->port)) pm_runtime_get_sync(uart->port.dev); if (serial8250_isa_config != NULL) serial8250_isa_config(0, &uart->port, &uart->capabilities); serial8250_apply_quirks(uart); ret = uart_add_one_port(&serial8250_reg, &uart->port); if (ret) goto err; ret = uart->port.line; } else { dev_info(uart->port.dev, "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", uart->port.iobase, (unsigned long long)uart->port.mapbase, uart->port.irq); ret = 0; } if (!uart->lsr_save_mask) uart->lsr_save_mask = LSR_SAVE_FLAGS; /* Use default LSR mask */ /* Initialise interrupt backoff work if required */ if (up->overrun_backoff_time_ms > 0) { uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms; INIT_DELAYED_WORK(&uart->overrun_backoff, serial_8250_overrun_backoff_work); } else { uart->overrun_backoff_time_ms = 0; } } unlock: mutex_unlock(&serial_mutex); return ret; err: uart->port.dev = NULL; mutex_unlock(&serial_mutex); return ret; } EXPORT_SYMBOL(serial8250_register_8250_port); /** * serial8250_unregister_port - remove a 16x50 serial port at runtime * @line: serial line number * * Remove one serial port. This may not be called from interrupt * context. We hand the port back to the our control. */ void serial8250_unregister_port(int line) { struct uart_8250_port *uart = &serial8250_ports[line]; mutex_lock(&serial_mutex); if (uart->em485) { unsigned long flags; uart_port_lock_irqsave(&uart->port, &flags); serial8250_em485_destroy(uart); uart_port_unlock_irqrestore(&uart->port, flags); } uart_remove_one_port(&serial8250_reg, &uart->port); if (serial8250_isa_devs) { uart->port.flags &= ~UPF_BOOT_AUTOCONF; uart->port.type = PORT_UNKNOWN; uart->port.dev = &serial8250_isa_devs->dev; uart->port.port_id = line; uart->capabilities = 0; serial8250_init_port(uart); serial8250_apply_quirks(uart); uart_add_one_port(&serial8250_reg, &uart->port); } else { uart->port.dev = NULL; } mutex_unlock(&serial_mutex); } EXPORT_SYMBOL(serial8250_unregister_port); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 /* * Linux V4L2 radio driver for the Griffin radioSHARK2 USB radio receiver * * Note the radioSHARK2 offers the audio through a regular USB audio device, * this driver only handles the tuning. * * The info necessary to drive the shark2 was taken from the small userspace * shark2.c program by Hisaaki Shibata, which he kindly placed in the Public * Domain. * * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <media/v4l2-device.h> #include "radio-tea5777.h" #if defined(CONFIG_LEDS_CLASS) || \ (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE)) #define SHARK_USE_LEDS 1 #endif MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); #define SHARK_IN_EP 0x83 #define SHARK_OUT_EP 0x05 #define TB_LEN 7 #define DRV_NAME "radioshark2" #define v4l2_dev_to_shark(d) container_of(d, struct shark_device, v4l2_dev) enum { BLUE_LED, RED_LED, NO_LEDS }; struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct radio_tea5777 tea; #ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][64]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; #endif u8 *transfer_buffer; }; static atomic_t shark_instance = ATOMIC_INIT(0); static int shark_write_reg(struct radio_tea5777 *tea, u64 reg) { struct shark_device *shark = tea->private_data; int i, res, actual_len; memset(shark->transfer_buffer, 0, TB_LEN); shark->transfer_buffer[0] = 0x81; /* Write register command */ for (i = 0; i < 6; i++) shark->transfer_buffer[i + 1] = (reg >> (40 - i * 8)) & 0xff; v4l2_dbg(1, debug, tea->v4l2_dev, "shark2-write: %*ph\n", 7, shark->transfer_buffer); res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, SHARK_OUT_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) { v4l2_err(tea->v4l2_dev, "write error: %d\n", res); return res; } return 0; } static int shark_read_reg(struct radio_tea5777 *tea, u32 *reg_ret) { struct shark_device *shark = tea->private_data; int i, res, actual_len; u32 reg = 0; memset(shark->transfer_buffer, 0, TB_LEN); shark->transfer_buffer[0] = 0x82; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, SHARK_OUT_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) { v4l2_err(tea->v4l2_dev, "request-read error: %d\n", res); return res; } res = usb_interrupt_msg(shark->usbdev, usb_rcvintpipe(shark->usbdev, SHARK_IN_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) { v4l2_err(tea->v4l2_dev, "read error: %d\n", res); return res; } for (i = 0; i < 3; i++) reg |= shark->transfer_buffer[i] << (16 - i * 8); v4l2_dbg(1, debug, tea->v4l2_dev, "shark2-read: %*ph\n", 3, shark->transfer_buffer); *reg_ret = reg; return 0; } static const struct radio_tea5777_ops shark_tea_ops = { .write_reg = shark_write_reg, .read_reg = shark_read_reg, }; #ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; for (i = 0; i < 2; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) continue; brightness = atomic_read(&shark->brightness[i]); memset(shark->transfer_buffer, 0, TB_LEN); shark->transfer_buffer[0] = 0x83 + i; shark->transfer_buffer[1] = brightness; res = usb_interrupt_msg(shark->usbdev, usb_sndintpipe(shark->usbdev, SHARK_OUT_EP), shark->transfer_buffer, TB_LEN, &actual_len, 1000); if (res < 0) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } } static void shark_led_set_blue(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[BLUE_LED]); atomic_set(&shark->brightness[BLUE_LED], value); set_bit(BLUE_LED, &shark->brightness_new); schedule_work(&shark->led_work); } static void shark_led_set_red(struct led_classdev *led_cdev, enum led_brightness value) { struct shark_device *shark = container_of(led_cdev, struct shark_device, leds[RED_LED]); atomic_set(&shark->brightness[RED_LED], value); set_bit(RED_LED, &shark->brightness_new); schedule_work(&shark->led_work); } static const struct led_classdev shark_led_templates[NO_LEDS] = { [BLUE_LED] = { .name = "%s:blue:", .brightness = LED_OFF, .max_brightness = 127, .brightness_set = shark_led_set_blue, }, [RED_LED] = { .name = "%s:red:", .brightness = LED_OFF, .max_brightness = 1, .brightness_set = shark_led_set_red, }, }; static int shark_register_leds(struct shark_device *shark, struct device *dev) { int i, retval; atomic_set(&shark->brightness[BLUE_LED], 127); INIT_WORK(&shark->led_work, shark_led_work); for (i = 0; i < NO_LEDS; i++) { shark->leds[i] = shark_led_templates[i]; snprintf(shark->led_names[i], sizeof(shark->led_names[0]), shark->leds[i].name, shark->v4l2_dev.name); shark->leds[i].name = shark->led_names[i]; retval = led_classdev_register(dev, &shark->leds[i]); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register led: %s\n", shark->led_names[i]); return retval; } } return 0; } static void shark_unregister_leds(struct shark_device *shark) { int i; for (i = 0; i < NO_LEDS; i++) led_classdev_unregister(&shark->leds[i]); cancel_work_sync(&shark->led_work); } static inline void shark_resume_leds(struct shark_device *shark) { int i; for (i = 0; i < NO_LEDS; i++) set_bit(i, &shark->brightness_new); schedule_work(&shark->led_work); } #else static int shark_register_leds(struct shark_device *shark, struct device *dev) { v4l2_warn(&shark->v4l2_dev, "CONFIG_LEDS_CLASS not enabled, LED support disabled\n"); return 0; } static inline void shark_unregister_leds(struct shark_device *shark) { } static inline void shark_resume_leds(struct shark_device *shark) { } #endif static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); radio_tea5777_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); } static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; int retval = -ENOMEM; static const u8 ep_addresses[] = { SHARK_IN_EP | USB_DIR_IN, SHARK_OUT_EP | USB_DIR_OUT, 0}; /* Are the expected endpoints present? */ if (!usb_check_int_endpoints(intf, ep_addresses)) { dev_err(&intf->dev, "Invalid radioSHARK2 device\n"); return -EINVAL; } shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) return retval; shark->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL); if (!shark->transfer_buffer) goto err_alloc_buffer; v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = shark_register_leds(shark, &intf->dev); if (retval) goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); goto err_reg_dev; } shark->usbdev = interface_to_usbdev(intf); shark->tea.v4l2_dev = &shark->v4l2_dev; shark->tea.private_data = shark; shark->tea.ops = &shark_tea_ops; shark->tea.has_am = true; shark->tea.write_before_read = true; strscpy(shark->tea.card, "Griffin radioSHARK2", sizeof(shark->tea.card)); usb_make_path(shark->usbdev, shark->tea.bus_info, sizeof(shark->tea.bus_info)); retval = radio_tea5777_init(&shark->tea, THIS_MODULE); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't init tea5777\n"); goto err_init_tea; } return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: shark_unregister_leds(shark); err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); return retval; } #ifdef CONFIG_PM static int usb_shark_suspend(struct usb_interface *intf, pm_message_t message) { return 0; } static int usb_shark_resume(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); int ret; mutex_lock(&shark->tea.mutex); ret = radio_tea5777_set_freq(&shark->tea); mutex_unlock(&shark->tea.mutex); shark_resume_leds(shark); return ret; } #endif /* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */ static const struct usb_device_id usb_shark_device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION | USB_DEVICE_ID_MATCH_INT_CLASS, .idVendor = 0x077d, .idProduct = 0x627a, .bcdDevice_lo = 0x0010, .bcdDevice_hi = 0x0010, .bInterfaceClass = 3, }, { } }; MODULE_DEVICE_TABLE(usb, usb_shark_device_table); static struct usb_driver usb_shark_driver = { .name = DRV_NAME, .probe = usb_shark_probe, .disconnect = usb_shark_disconnect, .id_table = usb_shark_device_table, #ifdef CONFIG_PM .suspend = usb_shark_suspend, .resume = usb_shark_resume, .reset_resume = usb_shark_resume, #endif }; module_usb_driver(usb_shark_driver);
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 // SPDX-License-Identifier: GPL-2.0-or-later /* * Behringer BCD2000 driver * * Copyright (C) 2014 Mario Kicherer (dev@kicherer.org) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitmap.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #define PREFIX "snd-bcd2000: " #define BUFSIZE 64 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1397, 0x00bd) }, { }, }; static const unsigned char device_cmd_prefix[] = {0x03, 0x00}; static const unsigned char bcd2000_init_sequence[] = { 0x07, 0x00, 0x00, 0x00, 0x78, 0x48, 0x1c, 0x81, 0xc4, 0x00, 0x00, 0x00, 0x5e, 0x53, 0x4a, 0xf7, 0x18, 0xfa, 0x11, 0xff, 0x6c, 0xf3, 0x90, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x18, 0xfa, 0x11, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x34, 0x4a, 0xf7, 0x18, 0xfa, 0x11, 0xff }; struct bcd2000 { struct usb_device *dev; struct snd_card *card; struct usb_interface *intf; int card_index; int midi_out_active; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *midi_receive_substream; struct snd_rawmidi_substream *midi_out_substream; unsigned char midi_in_buf[BUFSIZE]; unsigned char midi_out_buf[BUFSIZE]; struct urb *midi_out_urb; struct urb *midi_in_urb; struct usb_anchor anchor; }; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static DEFINE_MUTEX(devices_mutex); static DECLARE_BITMAP(devices_used, SNDRV_CARDS); static struct usb_driver bcd2000_driver; #ifdef CONFIG_SND_DEBUG static void bcd2000_dump_buffer(const char *prefix, const char *buf, int len) { print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, 16, 1, buf, len, false); } #else static void bcd2000_dump_buffer(const char *prefix, const char *buf, int len) {} #endif static int bcd2000_midi_input_open(struct snd_rawmidi_substream *substream) { return 0; } static int bcd2000_midi_input_close(struct snd_rawmidi_substream *substream) { return 0; } /* (de)register midi substream from client */ static void bcd2000_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct bcd2000 *bcd2k = substream->rmidi->private_data; bcd2k->midi_receive_substream = up ? substream : NULL; } static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k, const unsigned char *buf, unsigned int buf_len) { unsigned int payload_length, tocopy; struct snd_rawmidi_substream *midi_receive_substream; midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream); if (!midi_receive_substream) return; bcd2000_dump_buffer(PREFIX "received from device: ", buf, buf_len); if (buf_len < 2) return; payload_length = buf[0]; /* ignore packets without payload */ if (payload_length == 0) return; tocopy = min(payload_length, buf_len-1); bcd2000_dump_buffer(PREFIX "sending to userspace: ", &buf[1], tocopy); snd_rawmidi_receive(midi_receive_substream, &buf[1], tocopy); } static void bcd2000_midi_send(struct bcd2000 *bcd2k) { int len, ret; struct snd_rawmidi_substream *midi_out_substream; BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE); midi_out_substream = READ_ONCE(bcd2k->midi_out_substream); if (!midi_out_substream) return; /* copy command prefix bytes */ memcpy(bcd2k->midi_out_buf, device_cmd_prefix, sizeof(device_cmd_prefix)); /* * get MIDI packet and leave space for command prefix * and payload length */ len = snd_rawmidi_transmit(midi_out_substream, bcd2k->midi_out_buf + 3, BUFSIZE - 3); if (len < 0) dev_err(&bcd2k->dev->dev, "%s: snd_rawmidi_transmit error %d\n", __func__, len); if (len <= 0) return; /* set payload length */ bcd2k->midi_out_buf[2] = len; bcd2k->midi_out_urb->transfer_buffer_length = BUFSIZE; bcd2000_dump_buffer(PREFIX "sending to device: ", bcd2k->midi_out_buf, len+3); /* send packet to the BCD2000 */ ret = usb_submit_urb(bcd2k->midi_out_urb, GFP_ATOMIC); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s (%p): usb_submit_urb() failed, ret=%d, len=%d\n", __func__, midi_out_substream, ret, len); else bcd2k->midi_out_active = 1; } static int bcd2000_midi_output_open(struct snd_rawmidi_substream *substream) { return 0; } static int bcd2000_midi_output_close(struct snd_rawmidi_substream *substream) { struct bcd2000 *bcd2k = substream->rmidi->private_data; if (bcd2k->midi_out_active) { usb_kill_urb(bcd2k->midi_out_urb); bcd2k->midi_out_active = 0; } return 0; } /* (de)register midi substream from client */ static void bcd2000_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { struct bcd2000 *bcd2k = substream->rmidi->private_data; if (up) { bcd2k->midi_out_substream = substream; /* check if there is data userspace wants to send */ if (!bcd2k->midi_out_active) bcd2000_midi_send(bcd2k); } else { bcd2k->midi_out_substream = NULL; } } static void bcd2000_output_complete(struct urb *urb) { struct bcd2000 *bcd2k = urb->context; bcd2k->midi_out_active = 0; if (urb->status) dev_warn(&urb->dev->dev, PREFIX "output urb->status: %d\n", urb->status); if (urb->status == -ESHUTDOWN) return; /* check if there is more data userspace wants to send */ bcd2000_midi_send(bcd2k); } static void bcd2000_input_complete(struct urb *urb) { int ret; struct bcd2000 *bcd2k = urb->context; if (urb->status) dev_warn(&urb->dev->dev, PREFIX "input urb->status: %i\n", urb->status); if (!bcd2k || urb->status == -ESHUTDOWN) return; if (urb->actual_length > 0) bcd2000_midi_handle_input(bcd2k, urb->transfer_buffer, urb->actual_length); /* return URB to device */ ret = usb_submit_urb(bcd2k->midi_in_urb, GFP_ATOMIC); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() failed, ret=%d\n", __func__, ret); } static const struct snd_rawmidi_ops bcd2000_midi_output = { .open = bcd2000_midi_output_open, .close = bcd2000_midi_output_close, .trigger = bcd2000_midi_output_trigger, }; static const struct snd_rawmidi_ops bcd2000_midi_input = { .open = bcd2000_midi_input_open, .close = bcd2000_midi_input_close, .trigger = bcd2000_midi_input_trigger, }; static void bcd2000_init_device(struct bcd2000 *bcd2k) { int ret; init_usb_anchor(&bcd2k->anchor); usb_anchor_urb(bcd2k->midi_out_urb, &bcd2k->anchor); usb_anchor_urb(bcd2k->midi_in_urb, &bcd2k->anchor); /* copy init sequence into buffer */ memcpy(bcd2k->midi_out_buf, bcd2000_init_sequence, 52); bcd2k->midi_out_urb->transfer_buffer_length = 52; /* submit sequence */ ret = usb_submit_urb(bcd2k->midi_out_urb, GFP_KERNEL); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() out failed, ret=%d: ", __func__, ret); else bcd2k->midi_out_active = 1; /* pass URB to device to enable button and controller events */ ret = usb_submit_urb(bcd2k->midi_in_urb, GFP_KERNEL); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() in failed, ret=%d: ", __func__, ret); /* ensure initialization is finished */ usb_wait_anchor_empty_timeout(&bcd2k->anchor, 1000); } static int bcd2000_init_midi(struct bcd2000 *bcd2k) { int ret; struct snd_rawmidi *rmidi; ret = snd_rawmidi_new(bcd2k->card, bcd2k->card->shortname, 0, 1, /* output */ 1, /* input */ &rmidi); if (ret < 0) return ret; strscpy(rmidi->name, bcd2k->card->shortname, sizeof(rmidi->name)); rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = bcd2k; rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &bcd2000_midi_output); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &bcd2000_midi_input); bcd2k->rmidi = rmidi; bcd2k->midi_in_urb = usb_alloc_urb(0, GFP_KERNEL); bcd2k->midi_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!bcd2k->midi_in_urb || !bcd2k->midi_out_urb) { dev_err(&bcd2k->dev->dev, PREFIX "usb_alloc_urb failed\n"); return -ENOMEM; } usb_fill_int_urb(bcd2k->midi_in_urb, bcd2k->dev, usb_rcvintpipe(bcd2k->dev, 0x81), bcd2k->midi_in_buf, BUFSIZE, bcd2000_input_complete, bcd2k, 1); usb_fill_int_urb(bcd2k->midi_out_urb, bcd2k->dev, usb_sndintpipe(bcd2k->dev, 0x1), bcd2k->midi_out_buf, BUFSIZE, bcd2000_output_complete, bcd2k, 1); /* sanity checks of EPs before actually submitting */ if (usb_urb_ep_type_check(bcd2k->midi_in_urb) || usb_urb_ep_type_check(bcd2k->midi_out_urb)) { dev_err(&bcd2k->dev->dev, "invalid MIDI EP\n"); return -EINVAL; } bcd2000_init_device(bcd2k); return 0; } static void bcd2000_free_usb_related_resources(struct bcd2000 *bcd2k, struct usb_interface *interface) { usb_kill_urb(bcd2k->midi_out_urb); usb_kill_urb(bcd2k->midi_in_urb); usb_free_urb(bcd2k->midi_out_urb); usb_free_urb(bcd2k->midi_in_urb); if (bcd2k->intf) { usb_set_intfdata(bcd2k->intf, NULL); bcd2k->intf = NULL; } } static int bcd2000_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { struct snd_card *card; struct bcd2000 *bcd2k; unsigned int card_index; char usb_path[32]; int err; mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) if (!test_bit(card_index, devices_used)) break; if (card_index >= SNDRV_CARDS) { mutex_unlock(&devices_mutex); return -ENOENT; } err = snd_card_new(&interface->dev, index[card_index], id[card_index], THIS_MODULE, sizeof(*bcd2k), &card); if (err < 0) { mutex_unlock(&devices_mutex); return err; } bcd2k = card->private_data; bcd2k->dev = interface_to_usbdev(interface); bcd2k->card = card; bcd2k->card_index = card_index; bcd2k->intf = interface; snd_card_set_dev(card, &interface->dev); strscpy(card->driver, "snd-bcd2000", sizeof(card->driver)); strscpy(card->shortname, "BCD2000", sizeof(card->shortname)); usb_make_path(bcd2k->dev, usb_path, sizeof(usb_path)); snprintf(bcd2k->card->longname, sizeof(bcd2k->card->longname), "Behringer BCD2000 at %s", usb_path); err = bcd2000_init_midi(bcd2k); if (err < 0) goto probe_error; err = snd_card_register(card); if (err < 0) goto probe_error; usb_set_intfdata(interface, bcd2k); set_bit(card_index, devices_used); mutex_unlock(&devices_mutex); return 0; probe_error: dev_info(&bcd2k->dev->dev, PREFIX "error during probing"); bcd2000_free_usb_related_resources(bcd2k, interface); snd_card_free(card); mutex_unlock(&devices_mutex); return err; } static void bcd2000_disconnect(struct usb_interface *interface) { struct bcd2000 *bcd2k = usb_get_intfdata(interface); if (!bcd2k) return; mutex_lock(&devices_mutex); /* make sure that userspace cannot create new requests */ snd_card_disconnect(bcd2k->card); bcd2000_free_usb_related_resources(bcd2k, interface); clear_bit(bcd2k->card_index, devices_used); snd_card_free_when_closed(bcd2k->card); mutex_unlock(&devices_mutex); } static struct usb_driver bcd2000_driver = { .name = "snd-bcd2000", .probe = bcd2000_probe, .disconnect = bcd2000_disconnect, .id_table = id_table, }; module_usb_driver(bcd2000_driver); MODULE_DEVICE_TABLE(usb, id_table); MODULE_AUTHOR("Mario Kicherer, dev@kicherer.org"); MODULE_DESCRIPTION("Behringer BCD2000 driver"); MODULE_LICENSE("GPL");
102 103 99 97 100 103 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 // SPDX-License-Identifier: GPL-2.0-only #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> #include <net/busy_poll.h> #include <net/net_namespace.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/sock.h> #include <net/xdp.h> #include <net/xdp_sock.h> #include <net/page_pool/memory_provider.h> #include "dev.h" #include "devmem.h" #include "netdev-genl-gen.h" struct netdev_nl_dump_ctx { unsigned long ifindex; unsigned int rxq_idx; unsigned int txq_idx; unsigned int napi_id; }; static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) { NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); return (struct netdev_nl_dump_ctx *)cb->ctx; } static int netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { u64 xsk_features = 0; u64 xdp_rx_meta = 0; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ xdp_rx_meta |= flag; XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC if (netdev->xsk_tx_metadata_ops) { if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time) xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO; } if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, netdev->xdp_features, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, xdp_rx_meta, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, xsk_features, NETDEV_A_DEV_PAD)) goto err_cancel_msg; if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, netdev->xdp_zc_max_segs)) goto err_cancel_msg; } genlmsg_end(rsp, hdr); return 0; err_cancel_msg: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static void netdev_genl_dev_notify(struct net_device *netdev, int cmd) { struct genl_info info; struct sk_buff *ntf; if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), NETDEV_NLGRP_MGMT)) return; genl_info_init_ntf(&info, &netdev_nl_family, cmd); ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!ntf) return; if (netdev_nl_dev_fill(netdev, ntf, &info)) { nlmsg_free(ntf); return; } genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); } int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net_device *netdev; struct sk_buff *rsp; u32 ifindex; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; rtnl_lock(); netdev = __dev_get_by_index(genl_info_net(info), ifindex); if (netdev) err = netdev_nl_dev_fill(netdev, rsp, info); else err = -ENODEV; rtnl_unlock(); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; int err = 0; rtnl_lock(); for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); if (err < 0) break; } rtnl_unlock(); return err; } static int netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, const struct genl_info *info) { unsigned long irq_suspend_timeout; unsigned long gro_flush_timeout; u32 napi_defer_hard_irqs; void *hdr; pid_t pid; if (!napi->dev->up) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) goto nla_put_failure; if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) goto nla_put_failure; if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) goto nla_put_failure; if (napi->thread) { pid = task_pid_nr(napi->thread); if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) goto nla_put_failure; } napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, napi_defer_hard_irqs)) goto nla_put_failure; irq_suspend_timeout = napi_get_irq_suspend_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, irq_suspend_timeout)) goto nla_put_failure; gro_flush_timeout = napi_get_gro_flush_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, gro_flush_timeout)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; struct sk_buff *rsp; u32 napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_fill_one(rsp, napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } if (err) { goto err_free_msg; } else if (!rsp->len) { err = -ENOENT; goto err_free_msg; } return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { struct napi_struct *napi; unsigned int prev_id; int err = 0; if (!netdev->up) return err; prev_id = UINT_MAX; list_for_each_entry(napi, &netdev->napi_list, dev_list) { if (!napi_id_valid(napi->napi_id)) continue; /* Dump continuation below depends on the list being sorted */ WARN_ON_ONCE(napi->napi_id >= prev_id); prev_id = napi->napi_id; if (ctx->napi_id && napi->napi_id >= ctx->napi_id) continue; err = netdev_nl_napi_fill_one(rsp, napi, info); if (err) return err; ctx->napi_id = napi->napi_id; } return err; } int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_NAPI_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock(net, ifindex); if (netdev) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); netdev_unlock(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->napi_id = 0; } } return err; } static int netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) { u64 irq_suspend_timeout = 0; u64 gro_flush_timeout = 0; u32 defer = 0; if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); napi_set_defer_hard_irqs(napi, defer); } if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) { irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]); napi_set_irq_suspend_timeout(napi, irq_suspend_timeout); } if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) { gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]); napi_set_gro_flush_timeout(napi, gro_flush_timeout); } return 0; } int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; unsigned int napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_set_config(napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } return err; } static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi) { if (napi && napi_id_valid(napi->napi_id)) return nla_put_u32(skb, NETDEV_A_QUEUE_NAPI_ID, napi->napi_id); return 0; } static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { struct pp_memory_provider_params *params; struct netdev_rx_queue *rxq; struct netdev_queue *txq; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: rxq = __netif_get_rx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, rxq->napi)) goto nla_put_failure; params = &rxq->mp_params; if (params->mp_ops && params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (rxq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, txq->napi)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (txq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; } genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, u32 q_type) { switch (q_type) { case NETDEV_QUEUE_TYPE_RX: if (q_id >= netdev->real_num_rx_queues) return -EINVAL; return 0; case NETDEV_QUEUE_TYPE_TX: if (q_id >= netdev->real_num_tx_queues) return -EINVAL; } return 0; } static int netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { int err; if (!netdev->up) return -ENOENT; err = netdev_nl_queue_validate(netdev, q_idx, q_type); if (err) return err; return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); } int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) { u32 q_id, q_type, ifindex; struct net_device *netdev; struct sk_buff *rsp; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) return -EINVAL; q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; rtnl_lock(); netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (netdev) { err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); netdev_unlock(netdev); } else { err = -ENODEV; } rtnl_unlock(); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { int err = 0; if (!netdev->up) return err; for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, NETDEV_QUEUE_TYPE_RX, info); if (err) return err; } for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, NETDEV_QUEUE_TYPE_TX, info); if (err) return err; } return err; } int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rtnl_lock(); if (ifindex) { netdev = netdev_get_by_index_lock(net, ifindex); if (netdev) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); netdev_unlock(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->rxq_idx = 0; ctx->txq_idx = 0; } } rtnl_unlock(); return err; } #define NETDEV_STAT_NOT_SET (~0ULL) static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) { const u64 *add = _add; u64 *sum = _sum; while (size) { if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) *sum += *add; sum++; add++; size -= 8; } } static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) { if (value == NETDEV_STAT_NOT_SET) return 0; return nla_put_uint(rsp, attr_id, value); } static int netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_COMPLETE, rx->csum_complete) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, u32 q_type, int i, const struct genl_info *info) { const struct netdev_stat_ops *ops = netdev->stat_ops; struct netdev_queue_stats_rx rx; struct netdev_queue_stats_tx tx; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: memset(&rx, 0xff, sizeof(rx)); ops->get_queue_stats_rx(netdev, i, &rx); if (!memchr_inv(&rx, 0xff, sizeof(rx))) goto nla_cancel; if (netdev_nl_stats_write_rx(rsp, &rx)) goto nla_put_failure; break; case NETDEV_QUEUE_TYPE_TX: memset(&tx, 0xff, sizeof(tx)); ops->get_queue_stats_tx(netdev, i, &tx); if (!memchr_inv(&tx, 0xff, sizeof(tx))) goto nla_cancel; if (netdev_nl_stats_write_tx(rsp, &tx)) goto nla_put_failure; break; } genlmsg_end(rsp, hdr); return 0; nla_cancel: genlmsg_cancel(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { const struct netdev_stat_ops *ops = netdev->stat_ops; int i, err; if (!(netdev->flags & IFF_UP)) return 0; i = ctx->rxq_idx; while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, i, info); if (err) return err; ctx->rxq_idx = ++i; } i = ctx->txq_idx; while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, i, info); if (err) return err; ctx->txq_idx = ++i; } ctx->rxq_idx = 0; ctx->txq_idx = 0; return 0; } static int netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { struct netdev_queue_stats_rx rx_sum, rx; struct netdev_queue_stats_tx tx_sum, tx; const struct netdev_stat_ops *ops; void *hdr; int i; ops = netdev->stat_ops; /* Netdev can't guarantee any complete counters */ if (!ops->get_base_stats) return 0; memset(&rx_sum, 0xff, sizeof(rx_sum)); memset(&tx_sum, 0xff, sizeof(tx_sum)); ops->get_base_stats(netdev, &rx_sum, &tx_sum); /* The op was there, but nothing reported, don't bother */ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) goto nla_put_failure; for (i = 0; i < netdev->real_num_rx_queues; i++) { memset(&rx, 0xff, sizeof(rx)); if (ops->get_queue_stats_rx) ops->get_queue_stats_rx(netdev, i, &rx); netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); } for (i = 0; i < netdev->real_num_tx_queues; i++) { memset(&tx, 0xff, sizeof(tx)); if (ops->get_queue_stats_tx) ops->get_queue_stats_tx(netdev, i, &tx); netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); } if (netdev_nl_stats_write_rx(rsp, &rx_sum) || netdev_nl_stats_write_tx(rsp, &tx_sum)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, struct sk_buff *skb, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { if (!netdev->stat_ops) return 0; switch (scope) { case 0: return netdev_nl_stats_by_netdev(netdev, skb, info); case NETDEV_QSTATS_SCOPE_QUEUE: return netdev_nl_stats_by_queue(netdev, skb, info, ctx); } return -EINVAL; /* Should not happen, per netlink policy */ } int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; unsigned int ifindex; unsigned int scope; int err = 0; scope = 0; if (info->attrs[NETDEV_A_QSTATS_SCOPE]) scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); ifindex = 0; if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); rtnl_lock(); if (ifindex) { netdev = __dev_get_by_index(net, ifindex); if (netdev && netdev->stat_ops) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QSTATS_IFINDEX]); err = netdev ? -EOPNOTSUPP : -ENODEV; } } else { for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); if (err < 0) break; } } rtnl_unlock(); return err; } int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; struct net_devmem_dmabuf_binding *binding; u32 ifindex, dmabuf_fd, rxq_idx; struct netdev_nl_sock *priv; struct net_device *netdev; struct sk_buff *rsp; struct nlattr *attr; int rem, err = 0; void *hdr; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); if (IS_ERR(priv)) return PTR_ERR(priv); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; hdr = genlmsg_iput(rsp, info); if (!hdr) { err = -EMSGSIZE; goto err_genlmsg_free; } mutex_lock(&priv->lock); netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev || !netif_device_present(netdev)) { err = -ENODEV; goto err_unlock_sock; } if (!netdev_need_ops_lock(netdev)) { err = -EOPNOTSUPP; NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_DEV_IFINDEX]); goto err_unlock; } binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); goto err_unlock; } nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, genlmsg_data(info->genlhdr), genlmsg_len(info->genlhdr), rem) { err = nla_parse_nested( tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, netdev_queue_id_nl_policy, info->extack); if (err < 0) goto err_unbind; if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { err = -EINVAL; goto err_unbind; } if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); err = -EINVAL; goto err_unbind; } rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, info->extack); if (err) goto err_unbind; } list_add(&binding->list, &priv->bindings); nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); err = genlmsg_reply(rsp, info); if (err) goto err_unbind; netdev_unlock(netdev); mutex_unlock(&priv->lock); return 0; err_unbind: net_devmem_unbind_dmabuf(binding); err_unlock: netdev_unlock(netdev); err_unlock_sock: mutex_unlock(&priv->lock); err_genlmsg_free: nlmsg_free(rsp); return err; } void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv) { INIT_LIST_HEAD(&priv->bindings); mutex_init(&priv->lock); } void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv) { struct net_devmem_dmabuf_binding *binding; struct net_devmem_dmabuf_binding *temp; struct net_device *dev; mutex_lock(&priv->lock); list_for_each_entry_safe(binding, temp, &priv->bindings, list) { dev = binding->dev; netdev_lock(dev); net_devmem_unbind_dmabuf(binding); netdev_unlock(dev); } mutex_unlock(&priv->lock); } static int netdev_genl_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_REGISTER: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); break; case NETDEV_UNREGISTER: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); break; case NETDEV_XDP_FEAT_CHANGE: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); break; } return NOTIFY_OK; } static struct notifier_block netdev_genl_nb = { .notifier_call = netdev_genl_netdevice_event, }; static int __init netdev_genl_init(void) { int err; err = register_netdevice_notifier(&netdev_genl_nb); if (err) return err; err = genl_register_family(&netdev_nl_family); if (err) goto err_unreg_ntf; return 0; err_unreg_ntf: unregister_netdevice_notifier(&netdev_genl_nb); return err; } subsys_initcall(netdev_genl_init);
9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2021 Intel Corporation */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_codec.h" static int hci_codec_list_add(struct list_head *list, struct hci_op_read_local_codec_caps *sent, struct hci_rp_read_local_codec_caps *rp, void *caps, __u32 len) { struct codec_list *entry; entry = kzalloc(sizeof(*entry) + len, GFP_KERNEL); if (!entry) return -ENOMEM; entry->id = sent->id; if (sent->id == 0xFF) { entry->cid = __le16_to_cpu(sent->cid); entry->vid = __le16_to_cpu(sent->vid); } entry->transport = sent->transport; entry->len = len; entry->num_caps = 0; if (rp) { entry->num_caps = rp->num_caps; memcpy(entry->caps, caps, len); } list_add(&entry->list, list); return 0; } void hci_codec_list_clear(struct list_head *codec_list) { struct codec_list *c, *n; list_for_each_entry_safe(c, n, codec_list, list) { list_del(&c->list); kfree(c); } } static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, struct hci_op_read_local_codec_caps *cmd) { __u8 i; for (i = 0; i < TRANSPORT_TYPE_MAX; i++) { if (transport & BIT(i)) { struct hci_rp_read_local_codec_caps *rp; struct hci_codec_caps *caps; struct sk_buff *skb; __u8 j; __u32 len; cmd->transport = i; /* If Read_Codec_Capabilities command is not supported * then just add codec to the list without caps */ if (!(hdev->commands[45] & 0x08)) { hci_dev_lock(hdev); hci_codec_list_add(&hdev->local_codecs, cmd, NULL, NULL, 0); hci_dev_unlock(hdev); continue; } skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, sizeof(*cmd), cmd, 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read codec capabilities (%ld)", PTR_ERR(skb)); continue; } if (skb->len < sizeof(*rp)) goto error; rp = (void *)skb->data; if (rp->status) goto error; if (!rp->num_caps) { len = 0; /* this codec doesn't have capabilities */ goto skip_caps_parse; } skb_pull(skb, sizeof(*rp)); for (j = 0, len = 0; j < rp->num_caps; j++) { caps = (void *)skb->data; if (skb->len < sizeof(*caps)) goto error; if (skb->len < caps->len) goto error; len += sizeof(caps->len) + caps->len; skb_pull(skb, sizeof(caps->len) + caps->len); } skip_caps_parse: hci_dev_lock(hdev); hci_codec_list_add(&hdev->local_codecs, cmd, rp, (__u8 *)rp + sizeof(*rp), len); hci_dev_unlock(hdev); error: kfree_skb(skb); } } } void hci_read_supported_codecs(struct hci_dev *hdev) { struct sk_buff *skb; struct hci_rp_read_local_supported_codecs *rp; struct hci_std_codecs *std_codecs; struct hci_vnd_codecs *vnd_codecs; struct hci_op_read_local_codec_caps caps; __u8 i; skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", PTR_ERR(skb)); return; } if (skb->len < sizeof(*rp)) goto error; rp = (void *)skb->data; if (rp->status) goto error; skb_pull(skb, sizeof(rp->status)); std_codecs = (void *)skb->data; /* validate codecs length before accessing */ if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)) goto error; /* enumerate codec capabilities of standard codecs */ memset(&caps, 0, sizeof(caps)); for (i = 0; i < std_codecs->num; i++) { caps.id = std_codecs->codec[i]; caps.direction = 0x00; hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); } skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)); vnd_codecs = (void *)skb->data; /* validate vendor codecs length before accessing */ if (skb->len < flex_array_size(vnd_codecs, codec, vnd_codecs->num) + sizeof(vnd_codecs->num)) goto error; /* enumerate vendor codec capabilities */ for (i = 0; i < vnd_codecs->num; i++) { caps.id = 0xFF; caps.cid = vnd_codecs->codec[i].cid; caps.vid = vnd_codecs->codec[i].vid; caps.direction = 0x00; hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); } error: kfree_skb(skb); } void hci_read_supported_codecs_v2(struct hci_dev *hdev) { struct sk_buff *skb; struct hci_rp_read_local_supported_codecs_v2 *rp; struct hci_std_codecs_v2 *std_codecs; struct hci_vnd_codecs_v2 *vnd_codecs; struct hci_op_read_local_codec_caps caps; __u8 i; skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL, 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", PTR_ERR(skb)); return; } if (skb->len < sizeof(*rp)) goto error; rp = (void *)skb->data; if (rp->status) goto error; skb_pull(skb, sizeof(rp->status)); std_codecs = (void *)skb->data; /* check for payload data length before accessing */ if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)) goto error; memset(&caps, 0, sizeof(caps)); for (i = 0; i < std_codecs->num; i++) { caps.id = std_codecs->codec[i].id; hci_read_codec_capabilities(hdev, std_codecs->codec[i].transport, &caps); } skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)); vnd_codecs = (void *)skb->data; /* check for payload data length before accessing */ if (skb->len < flex_array_size(vnd_codecs, codec, vnd_codecs->num) + sizeof(vnd_codecs->num)) goto error; for (i = 0; i < vnd_codecs->num; i++) { caps.id = 0xFF; caps.cid = vnd_codecs->codec[i].cid; caps.vid = vnd_codecs->codec[i].vid; hci_read_codec_capabilities(hdev, vnd_codecs->codec[i].transport, &caps); } error: kfree_skb(skb); }
1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 // SPDX-License-Identifier: GPL-2.0-or-later /* cx25840 - Conexant CX25840 audio/video decoder driver * * Copyright (C) 2004 Ulf Eklund * * Based on the saa7115 driver and on the first version of Chris Kennedy's * cx25840 driver. * * Changes by Tyler Trafford <tatrafford@comcast.net> * - cleanup/rewrite for V4L2 API (2005) * * VBI support by Hans Verkuil <hverkuil@xs4all.nl>. * * NTSC sliced VBI support by Christopher Neufeld <television@cneufeld.ca> * with additional fixes by Hans Verkuil <hverkuil@xs4all.nl>. * * CX23885 support by Steven Toth <stoth@linuxtv.org>. * * CX2388[578] IRQ handling, IO Pin mux configuration and other small fixes are * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net> * * CX23888 DIF support for the HVR1850 * Copyright (C) 2011 Steven Toth <stoth@kernellabs.com> * * CX2584x pin to pad mapping and output format configuration support are * Copyright (C) 2011 Maciej S. Szmigiero <mail@maciej.szmigiero.name> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/math64.h> #include <media/v4l2-common.h> #include <media/drv-intf/cx25840.h> #include "cx25840-core.h" MODULE_DESCRIPTION("Conexant CX25840 audio/video decoder driver"); MODULE_AUTHOR("Ulf Eklund, Chris Kennedy, Hans Verkuil, Tyler Trafford"); MODULE_LICENSE("GPL"); #define CX25840_VID_INT_STAT_REG 0x410 #define CX25840_VID_INT_STAT_BITS 0x0000ffff #define CX25840_VID_INT_MASK_BITS 0xffff0000 #define CX25840_VID_INT_MASK_SHFT 16 #define CX25840_VID_INT_MASK_REG 0x412 #define CX23885_AUD_MC_INT_MASK_REG 0x80c #define CX23885_AUD_MC_INT_STAT_BITS 0xffff0000 #define CX23885_AUD_MC_INT_CTRL_BITS 0x0000ffff #define CX23885_AUD_MC_INT_STAT_SHFT 16 #define CX25840_AUD_INT_CTRL_REG 0x812 #define CX25840_AUD_INT_STAT_REG 0x813 #define CX23885_PIN_CTRL_IRQ_REG 0x123 #define CX23885_PIN_CTRL_IRQ_IR_STAT 0x40 #define CX23885_PIN_CTRL_IRQ_AUD_STAT 0x20 #define CX23885_PIN_CTRL_IRQ_VID_STAT 0x10 #define CX25840_IR_STATS_REG 0x210 #define CX25840_IR_IRQEN_REG 0x214 static int cx25840_debug; module_param_named(debug, cx25840_debug, int, 0644); MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]"); /* ----------------------------------------------------------------------- */ static void cx23888_std_setup(struct i2c_client *client); int cx25840_write(struct i2c_client *client, u16 addr, u8 value) { u8 buffer[3]; buffer[0] = addr >> 8; buffer[1] = addr & 0xff; buffer[2] = value; return i2c_master_send(client, buffer, 3); } int cx25840_write4(struct i2c_client *client, u16 addr, u32 value) { u8 buffer[6]; buffer[0] = addr >> 8; buffer[1] = addr & 0xff; buffer[2] = value & 0xff; buffer[3] = (value >> 8) & 0xff; buffer[4] = (value >> 16) & 0xff; buffer[5] = value >> 24; return i2c_master_send(client, buffer, 6); } u8 cx25840_read(struct i2c_client *client, u16 addr) { struct i2c_msg msgs[2]; u8 tx_buf[2], rx_buf[1]; /* Write register address */ tx_buf[0] = addr >> 8; tx_buf[1] = addr & 0xff; msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 2; msgs[0].buf = (char *)tx_buf; /* Read data from register */ msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = 1; msgs[1].buf = (char *)rx_buf; if (i2c_transfer(client->adapter, msgs, 2) < 2) return 0; return rx_buf[0]; } u32 cx25840_read4(struct i2c_client *client, u16 addr) { struct i2c_msg msgs[2]; u8 tx_buf[2], rx_buf[4]; /* Write register address */ tx_buf[0] = addr >> 8; tx_buf[1] = addr & 0xff; msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 2; msgs[0].buf = (char *)tx_buf; /* Read data from registers */ msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = 4; msgs[1].buf = (char *)rx_buf; if (i2c_transfer(client->adapter, msgs, 2) < 2) return 0; return (rx_buf[3] << 24) | (rx_buf[2] << 16) | (rx_buf[1] << 8) | rx_buf[0]; } int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned int and_mask, u8 or_value) { return cx25840_write(client, addr, (cx25840_read(client, addr) & and_mask) | or_value); } int cx25840_and_or4(struct i2c_client *client, u16 addr, u32 and_mask, u32 or_value) { return cx25840_write4(client, addr, (cx25840_read4(client, addr) & and_mask) | or_value); } /* ----------------------------------------------------------------------- */ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input, enum cx25840_audio_input aud_input); /* ----------------------------------------------------------------------- */ static int cx23885_s_io_pin_config(struct v4l2_subdev *sd, size_t n, struct v4l2_subdev_io_pin_config *p) { struct i2c_client *client = v4l2_get_subdevdata(sd); int i; u32 pin_ctrl; u8 gpio_oe, gpio_data, strength; pin_ctrl = cx25840_read4(client, 0x120); gpio_oe = cx25840_read(client, 0x160); gpio_data = cx25840_read(client, 0x164); for (i = 0; i < n; i++) { strength = p[i].strength; if (strength > CX25840_PIN_DRIVE_FAST) strength = CX25840_PIN_DRIVE_FAST; switch (p[i].pin) { case CX23885_PIN_IRQ_N_GPIO16: if (p[i].function != CX23885_PAD_IRQ_N) { /* GPIO16 */ pin_ctrl &= ~(0x1 << 25); } else { /* IRQ_N */ if (p[i].flags & (BIT(V4L2_SUBDEV_IO_PIN_DISABLE) | BIT(V4L2_SUBDEV_IO_PIN_INPUT))) { pin_ctrl &= ~(0x1 << 25); } else { pin_ctrl |= (0x1 << 25); } if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_ACTIVE_LOW)) { pin_ctrl &= ~(0x1 << 24); } else { pin_ctrl |= (0x1 << 24); } } break; case CX23885_PIN_IR_RX_GPIO19: if (p[i].function != CX23885_PAD_GPIO19) { /* IR_RX */ gpio_oe |= (0x1 << 0); pin_ctrl &= ~(0x3 << 18); pin_ctrl |= (strength << 18); } else { /* GPIO19 */ gpio_oe &= ~(0x1 << 0); if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) { gpio_data &= ~(0x1 << 0); gpio_data |= ((p[i].value & 0x1) << 0); } pin_ctrl &= ~(0x3 << 12); pin_ctrl |= (strength << 12); } break; case CX23885_PIN_IR_TX_GPIO20: if (p[i].function != CX23885_PAD_GPIO20) { /* IR_TX */ gpio_oe |= (0x1 << 1); if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE)) pin_ctrl &= ~(0x1 << 10); else pin_ctrl |= (0x1 << 10); pin_ctrl &= ~(0x3 << 18); pin_ctrl |= (strength << 18); } else { /* GPIO20 */ gpio_oe &= ~(0x1 << 1); if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) { gpio_data &= ~(0x1 << 1); gpio_data |= ((p[i].value & 0x1) << 1); } pin_ctrl &= ~(0x3 << 12); pin_ctrl |= (strength << 12); } break; case CX23885_PIN_I2S_SDAT_GPIO21: if (p[i].function != CX23885_PAD_GPIO21) { /* I2S_SDAT */ /* TODO: Input or Output config */ gpio_oe |= (0x1 << 2); pin_ctrl &= ~(0x3 << 22); pin_ctrl |= (strength << 22); } else { /* GPIO21 */ gpio_oe &= ~(0x1 << 2); if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) { gpio_data &= ~(0x1 << 2); gpio_data |= ((p[i].value & 0x1) << 2); } pin_ctrl &= ~(0x3 << 12); pin_ctrl |= (strength << 12); } break; case CX23885_PIN_I2S_WCLK_GPIO22: if (p[i].function != CX23885_PAD_GPIO22) { /* I2S_WCLK */ /* TODO: Input or Output config */ gpio_oe |= (0x1 << 3); pin_ctrl &= ~(0x3 << 22); pin_ctrl |= (strength << 22); } else { /* GPIO22 */ gpio_oe &= ~(0x1 << 3); if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) { gpio_data &= ~(0x1 << 3); gpio_data |= ((p[i].value & 0x1) << 3); } pin_ctrl &= ~(0x3 << 12); pin_ctrl |= (strength << 12); } break; case CX23885_PIN_I2S_BCLK_GPIO23: if (p[i].function != CX23885_PAD_GPIO23) { /* I2S_BCLK */ /* TODO: Input or Output config */ gpio_oe |= (0x1 << 4); pin_ctrl &= ~(0x3 << 22); pin_ctrl |= (strength << 22); } else { /* GPIO23 */ gpio_oe &= ~(0x1 << 4); if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_SET_VALUE)) { gpio_data &= ~(0x1 << 4); gpio_data |= ((p[i].value & 0x1) << 4); } pin_ctrl &= ~(0x3 << 12); pin_ctrl |= (strength << 12); } break; } } cx25840_write(client, 0x164, gpio_data); cx25840_write(client, 0x160, gpio_oe); cx25840_write4(client, 0x120, pin_ctrl); return 0; } static u8 cx25840_function_to_pad(struct i2c_client *client, u8 function) { if (function > CX25840_PAD_VRESET) { v4l_err(client, "invalid function %u, assuming default\n", (unsigned int)function); return 0; } return function; } static void cx25840_set_invert(u8 *pinctrl3, u8 *voutctrl4, u8 function, u8 pin, bool invert) { switch (function) { case CX25840_PAD_IRQ_N: if (invert) *pinctrl3 &= ~2; else *pinctrl3 |= 2; break; case CX25840_PAD_ACTIVE: if (invert) *voutctrl4 |= BIT(2); else *voutctrl4 &= ~BIT(2); break; case CX25840_PAD_VACTIVE: if (invert) *voutctrl4 |= BIT(5); else *voutctrl4 &= ~BIT(5); break; case CX25840_PAD_CBFLAG: if (invert) *voutctrl4 |= BIT(4); else *voutctrl4 &= ~BIT(4); break; case CX25840_PAD_VRESET: if (invert) *voutctrl4 |= BIT(0); else *voutctrl4 &= ~BIT(0); break; } if (function != CX25840_PAD_DEFAULT) return; switch (pin) { case CX25840_PIN_DVALID_PRGM0: if (invert) *voutctrl4 |= BIT(6); else *voutctrl4 &= ~BIT(6); break; case CX25840_PIN_HRESET_PRGM2: if (invert) *voutctrl4 |= BIT(1); else *voutctrl4 &= ~BIT(1); break; } } static int cx25840_s_io_pin_config(struct v4l2_subdev *sd, size_t n, struct v4l2_subdev_io_pin_config *p) { struct i2c_client *client = v4l2_get_subdevdata(sd); unsigned int i; u8 pinctrl[6], pinconf[10], voutctrl4; for (i = 0; i < 6; i++) pinctrl[i] = cx25840_read(client, 0x114 + i); for (i = 0; i < 10; i++) pinconf[i] = cx25840_read(client, 0x11c + i); voutctrl4 = cx25840_read(client, 0x407); for (i = 0; i < n; i++) { u8 strength = p[i].strength; if (strength != CX25840_PIN_DRIVE_SLOW && strength != CX25840_PIN_DRIVE_MEDIUM && strength != CX25840_PIN_DRIVE_FAST) { v4l_err(client, "invalid drive speed for pin %u (%u), assuming fast\n", (unsigned int)p[i].pin, (unsigned int)strength); strength = CX25840_PIN_DRIVE_FAST; } switch (p[i].pin) { case CX25840_PIN_DVALID_PRGM0: if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE)) pinctrl[0] &= ~BIT(6); else pinctrl[0] |= BIT(6); pinconf[3] &= 0xf0; pinconf[3] |= cx25840_function_to_pad(client, p[i].function); cx25840_set_invert(&pinctrl[3], &voutctrl4, p[i].function, CX25840_PIN_DVALID_PRGM0, p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_ACTIVE_LOW)); pinctrl[4] &= ~(3 << 2); /* CX25840_PIN_DRIVE_MEDIUM */ switch (strength) { case CX25840_PIN_DRIVE_SLOW: pinctrl[4] |= 1 << 2; break; case CX25840_PIN_DRIVE_FAST: pinctrl[4] |= 2 << 2; break; } break; case CX25840_PIN_HRESET_PRGM2: if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE)) pinctrl[1] &= ~BIT(0); else pinctrl[1] |= BIT(0); pinconf[4] &= 0xf0; pinconf[4] |= cx25840_function_to_pad(client, p[i].function); cx25840_set_invert(&pinctrl[3], &voutctrl4, p[i].function, CX25840_PIN_HRESET_PRGM2, p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_ACTIVE_LOW)); pinctrl[4] &= ~(3 << 2); /* CX25840_PIN_DRIVE_MEDIUM */ switch (strength) { case CX25840_PIN_DRIVE_SLOW: pinctrl[4] |= 1 << 2; break; case CX25840_PIN_DRIVE_FAST: pinctrl[4] |= 2 << 2; break; } break; case CX25840_PIN_PLL_CLK_PRGM7: if (p[i].flags & BIT(V4L2_SUBDEV_IO_PIN_DISABLE)) pinctrl[2] &= ~BIT(2); else pinctrl[2] |= BIT(2); switch (p[i].function) { case CX25840_PAD_XTI_X5_DLL: pinconf[6] = 0; break; case CX25840_PAD_AUX_PLL: pinconf[6] = 1; break; case CX25840_PAD_VID_PLL: pinconf[6] = 5; break; case CX25840_PAD_XTI: pinconf[6] = 2; break; default: pinconf[6] = 3; pinconf[6] |= cx25840_function_to_pad(client, p[i].function) << 4; } break; default: v4l_err(client, "invalid or unsupported pin %u\n", (unsigned int)p[i].pin); break; } } cx25840_write(client, 0x407, voutctrl4); for (i = 0; i < 6; i++) cx25840_write(client, 0x114 + i, pinctrl[i]); for (i = 0; i < 10; i++) cx25840_write(client, 0x11c + i, pinconf[i]); return 0; } static int common_s_io_pin_config(struct v4l2_subdev *sd, size_t n, struct v4l2_subdev_io_pin_config *pincfg) { struct cx25840_state *state = to_state(sd); if (is_cx2388x(state)) return cx23885_s_io_pin_config(sd, n, pincfg); else if (is_cx2584x(state)) return cx25840_s_io_pin_config(sd, n, pincfg); return 0; } /* ----------------------------------------------------------------------- */ static void init_dll1(struct i2c_client *client) { /* * This is the Hauppauge sequence used to * initialize the Delay Lock Loop 1 (ADC DLL). */ cx25840_write(client, 0x159, 0x23); cx25840_write(client, 0x15a, 0x87); cx25840_write(client, 0x15b, 0x06); udelay(10); cx25840_write(client, 0x159, 0xe1); udelay(10); cx25840_write(client, 0x15a, 0x86); cx25840_write(client, 0x159, 0xe0); cx25840_write(client, 0x159, 0xe1); cx25840_write(client, 0x15b, 0x10); } static void init_dll2(struct i2c_client *client) { /* * This is the Hauppauge sequence used to * initialize the Delay Lock Loop 2 (ADC DLL). */ cx25840_write(client, 0x15d, 0xe3); cx25840_write(client, 0x15e, 0x86); cx25840_write(client, 0x15f, 0x06); udelay(10); cx25840_write(client, 0x15d, 0xe1); cx25840_write(client, 0x15d, 0xe0); cx25840_write(client, 0x15d, 0xe1); } static void cx25836_initialize(struct i2c_client *client) { /* *reset configuration is described on page 3-77 * of the CX25836 datasheet */ /* 2. */ cx25840_and_or(client, 0x000, ~0x01, 0x01); cx25840_and_or(client, 0x000, ~0x01, 0x00); /* 3a. */ cx25840_and_or(client, 0x15a, ~0x70, 0x00); /* 3b. */ cx25840_and_or(client, 0x15b, ~0x1e, 0x06); /* 3c. */ cx25840_and_or(client, 0x159, ~0x02, 0x02); /* 3d. */ udelay(10); /* 3e. */ cx25840_and_or(client, 0x159, ~0x02, 0x00); /* 3f. */ cx25840_and_or(client, 0x159, ~0xc0, 0xc0); /* 3g. */ cx25840_and_or(client, 0x159, ~0x01, 0x00); cx25840_and_or(client, 0x159, ~0x01, 0x01); /* 3h. */ cx25840_and_or(client, 0x15b, ~0x1e, 0x10); } static void cx25840_work_handler(struct work_struct *work) { struct cx25840_state *state = container_of(work, struct cx25840_state, fw_work); cx25840_loadfw(state->c); wake_up(&state->fw_wait); } #define CX25840_VCONFIG_SET_BIT(state, opt_msk, voc, idx, bit, oneval) \ do { \ if ((state)->vid_config & (opt_msk)) { \ if (((state)->vid_config & (opt_msk)) == \ (oneval)) \ (voc)[idx] |= BIT(bit); \ else \ (voc)[idx] &= ~BIT(bit); \ } \ } while (0) /* apply current vconfig to hardware regs */ static void cx25840_vconfig_apply(struct i2c_client *client) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); u8 voutctrl[3]; unsigned int i; for (i = 0; i < 3; i++) voutctrl[i] = cx25840_read(client, 0x404 + i); if (state->vid_config & CX25840_VCONFIG_FMT_MASK) voutctrl[0] &= ~3; switch (state->vid_config & CX25840_VCONFIG_FMT_MASK) { case CX25840_VCONFIG_FMT_BT656: voutctrl[0] |= 1; break; case CX25840_VCONFIG_FMT_VIP11: voutctrl[0] |= 2; break; case CX25840_VCONFIG_FMT_VIP2: voutctrl[0] |= 3; break; case CX25840_VCONFIG_FMT_BT601: /* zero */ default: break; } CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_RES_MASK, voutctrl, 0, 2, CX25840_VCONFIG_RES_10BIT); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_VBIRAW_MASK, voutctrl, 0, 3, CX25840_VCONFIG_VBIRAW_ENABLED); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_ANCDATA_MASK, voutctrl, 0, 4, CX25840_VCONFIG_ANCDATA_ENABLED); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_TASKBIT_MASK, voutctrl, 0, 5, CX25840_VCONFIG_TASKBIT_ONE); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_ACTIVE_MASK, voutctrl, 1, 2, CX25840_VCONFIG_ACTIVE_HORIZONTAL); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_VALID_MASK, voutctrl, 1, 3, CX25840_VCONFIG_VALID_ANDACTIVE); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_HRESETW_MASK, voutctrl, 1, 4, CX25840_VCONFIG_HRESETW_PIXCLK); if (state->vid_config & CX25840_VCONFIG_CLKGATE_MASK) voutctrl[1] &= ~(3 << 6); switch (state->vid_config & CX25840_VCONFIG_CLKGATE_MASK) { case CX25840_VCONFIG_CLKGATE_VALID: voutctrl[1] |= 2; break; case CX25840_VCONFIG_CLKGATE_VALIDACTIVE: voutctrl[1] |= 3; break; case CX25840_VCONFIG_CLKGATE_NONE: /* zero */ default: break; } CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_DCMODE_MASK, voutctrl, 2, 0, CX25840_VCONFIG_DCMODE_BYTES); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_IDID0S_MASK, voutctrl, 2, 1, CX25840_VCONFIG_IDID0S_LINECNT); CX25840_VCONFIG_SET_BIT(state, CX25840_VCONFIG_VIPCLAMP_MASK, voutctrl, 2, 4, CX25840_VCONFIG_VIPCLAMP_ENABLED); for (i = 0; i < 3; i++) cx25840_write(client, 0x404 + i, voutctrl[i]); } static void cx25840_initialize(struct i2c_client *client) { DEFINE_WAIT(wait); struct cx25840_state *state = to_state(i2c_get_clientdata(client)); struct workqueue_struct *q; /* datasheet startup in numbered steps, refer to page 3-77 */ /* 2. */ cx25840_and_or(client, 0x803, ~0x10, 0x00); /* * The default of this register should be 4, but I get 0 instead. * Set this register to 4 manually. */ cx25840_write(client, 0x000, 0x04); /* 3. */ init_dll1(client); init_dll2(client); cx25840_write(client, 0x136, 0x0a); /* 4. */ cx25840_write(client, 0x13c, 0x01); cx25840_write(client, 0x13c, 0x00); /* 5. */ /* * Do the firmware load in a work handler to prevent. * Otherwise the kernel is blocked waiting for the * bit-banging i2c interface to finish uploading the * firmware. */ INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); if (q) { prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); queue_work(q, &state->fw_work); schedule(); finish_wait(&state->fw_wait, &wait); destroy_workqueue(q); } /* 6. */ cx25840_write(client, 0x115, 0x8c); cx25840_write(client, 0x116, 0x07); cx25840_write(client, 0x118, 0x02); /* 7. */ cx25840_write(client, 0x4a5, 0x80); cx25840_write(client, 0x4a5, 0x00); cx25840_write(client, 0x402, 0x00); /* 8. */ cx25840_and_or(client, 0x401, ~0x18, 0); cx25840_and_or(client, 0x4a2, ~0x10, 0x10); /* steps 8c and 8d are done in change_input() */ /* 10. */ cx25840_write(client, 0x8d3, 0x1f); cx25840_write(client, 0x8e3, 0x03); cx25840_std_setup(client); /* trial and error says these are needed to get audio */ cx25840_write(client, 0x914, 0xa0); cx25840_write(client, 0x918, 0xa0); cx25840_write(client, 0x919, 0x01); /* stereo preferred */ cx25840_write(client, 0x809, 0x04); /* AC97 shift */ cx25840_write(client, 0x8cf, 0x0f); /* (re)set input */ set_input(client, state->vid_input, state->aud_input); if (state->generic_mode) cx25840_vconfig_apply(client); /* start microcontroller */ cx25840_and_or(client, 0x803, ~0x10, 0x10); } static void cx23885_initialize(struct i2c_client *client) { DEFINE_WAIT(wait); struct cx25840_state *state = to_state(i2c_get_clientdata(client)); u32 clk_freq = 0; struct workqueue_struct *q; /* cx23885 sets hostdata to clk_freq pointer */ if (v4l2_get_subdev_hostdata(&state->sd)) clk_freq = *((u32 *)v4l2_get_subdev_hostdata(&state->sd)); /* * Come out of digital power down * The CX23888, at least, needs this, otherwise registers aside from * 0x0-0x2 can't be read or written. */ cx25840_write(client, 0x000, 0); /* Internal Reset */ cx25840_and_or(client, 0x102, ~0x01, 0x01); cx25840_and_or(client, 0x102, ~0x01, 0x00); /* Stop microcontroller */ cx25840_and_or(client, 0x803, ~0x10, 0x00); /* DIF in reset? */ cx25840_write(client, 0x398, 0); /* * Trust the default xtal, no division * '885: 28.636363... MHz * '887: 25.000000 MHz * '888: 50.000000 MHz */ cx25840_write(client, 0x2, 0x76); /* Power up all the PLL's and DLL */ cx25840_write(client, 0x1, 0x40); /* Sys PLL */ switch (state->id) { case CX23888_AV: /* * 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz * 572.73 MHz before post divide */ if (clk_freq == 25000000) { /* 888/ImpactVCBe or 25Mhz xtal */ ; /* nothing to do */ } else { /* HVR1850 or 50MHz xtal */ cx25840_write(client, 0x2, 0x71); } cx25840_write4(client, 0x11c, 0x01d1744c); cx25840_write4(client, 0x118, 0x00000416); cx25840_write4(client, 0x404, 0x0010253e); cx25840_write4(client, 0x42c, 0x42600000); cx25840_write4(client, 0x44c, 0x161f1000); break; case CX23887_AV: /* * 25.0 MHz * (0x16 + 0x1d1744c/0x2000000)/4 = 5 * 28.636363 MHz * 572.73 MHz before post divide */ cx25840_write4(client, 0x11c, 0x01d1744c); cx25840_write4(client, 0x118, 0x00000416); break; case CX23885_AV: default: /* * 28.636363 MHz * (0x14 + 0x0/0x2000000)/4 = 5 * 28.636363 MHz * 572.73 MHz before post divide */ cx25840_write4(client, 0x11c, 0x00000000); cx25840_write4(client, 0x118, 0x00000414); break; } /* Disable DIF bypass */ cx25840_write4(client, 0x33c, 0x00000001); /* DIF Src phase inc */ cx25840_write4(client, 0x340, 0x0df7df83); /* * Vid PLL * Setup for a BT.656 pixel clock of 13.5 Mpixels/second * * 28.636363 MHz * (0xf + 0x02be2c9/0x2000000)/4 = 8 * 13.5 MHz * 432.0 MHz before post divide */ /* HVR1850 */ switch (state->id) { case CX23888_AV: if (clk_freq == 25000000) { /* 888/ImpactVCBe or 25MHz xtal */ cx25840_write4(client, 0x10c, 0x01b6db7b); cx25840_write4(client, 0x108, 0x00000512); } else { /* 888/HVR1250 or 50MHz xtal */ cx25840_write4(client, 0x10c, 0x13333333); cx25840_write4(client, 0x108, 0x00000515); } break; default: cx25840_write4(client, 0x10c, 0x002be2c9); cx25840_write4(client, 0x108, 0x0000040f); } /* Luma */ cx25840_write4(client, 0x414, 0x00107d12); /* Chroma */ if (is_cx23888(state)) cx25840_write4(client, 0x418, 0x1d008282); else cx25840_write4(client, 0x420, 0x3d008282); /* * Aux PLL * Initial setup for audio sample clock: * 48 ksps, 16 bits/sample, x160 multiplier = 122.88 MHz * Initial I2S output/master clock(?): * 48 ksps, 16 bits/sample, x16 multiplier = 12.288 MHz */ switch (state->id) { case CX23888_AV: /* * 50.0 MHz * (0x7 + 0x0bedfa4/0x2000000)/3 = 122.88 MHz * 368.64 MHz before post divide * 122.88 MHz / 0xa = 12.288 MHz */ /* HVR1850 or 50MHz xtal or 25MHz xtal */ cx25840_write4(client, 0x114, 0x017dbf48); cx25840_write4(client, 0x110, 0x000a030e); break; case CX23887_AV: /* * 25.0 MHz * (0xe + 0x17dbf48/0x2000000)/3 = 122.88 MHz * 368.64 MHz before post divide * 122.88 MHz / 0xa = 12.288 MHz */ cx25840_write4(client, 0x114, 0x017dbf48); cx25840_write4(client, 0x110, 0x000a030e); break; case CX23885_AV: default: /* * 28.636363 MHz * (0xc + 0x1bf0c9e/0x2000000)/3 = 122.88 MHz * 368.64 MHz before post divide * 122.88 MHz / 0xa = 12.288 MHz */ cx25840_write4(client, 0x114, 0x01bf0c9e); cx25840_write4(client, 0x110, 0x000a030c); break; } /* ADC2 input select */ cx25840_write(client, 0x102, 0x10); /* VIN1 & VIN5 */ cx25840_write(client, 0x103, 0x11); /* Enable format auto detect */ cx25840_write(client, 0x400, 0); /* Fast subchroma lock */ /* White crush, Chroma AGC & Chroma Killer enabled */ cx25840_write(client, 0x401, 0xe8); /* Select AFE clock pad output source */ cx25840_write(client, 0x144, 0x05); /* Drive GPIO2 direction and values for HVR1700 * where an onboard mux selects the output of demodulator * vs the 417. Failure to set this results in no DTV. * It's safe to set this across all Hauppauge boards * currently, regardless of the board type. */ cx25840_write(client, 0x160, 0x1d); cx25840_write(client, 0x164, 0x00); /* * Do the firmware load in a work handler to prevent. * Otherwise the kernel is blocked waiting for the * bit-banging i2c interface to finish uploading the * firmware. */ INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); if (q) { prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); queue_work(q, &state->fw_work); schedule(); finish_wait(&state->fw_wait, &wait); destroy_workqueue(q); } /* * Call the cx23888 specific std setup func, we no longer rely on * the generic cx24840 func. */ if (is_cx23888(state)) cx23888_std_setup(client); else cx25840_std_setup(client); /* (re)set input */ set_input(client, state->vid_input, state->aud_input); /* start microcontroller */ cx25840_and_or(client, 0x803, ~0x10, 0x10); /* Disable and clear video interrupts - we don't use them */ cx25840_write4(client, CX25840_VID_INT_STAT_REG, 0xffffffff); /* Disable and clear audio interrupts - we don't use them */ cx25840_write(client, CX25840_AUD_INT_CTRL_REG, 0xff); cx25840_write(client, CX25840_AUD_INT_STAT_REG, 0xff); /* CC raw enable */ /* * - VIP 1.1 control codes - 10bit, blue field enable. * - enable raw data during vertical blanking. * - enable ancillary Data insertion for 656 or VIP. */ cx25840_write4(client, 0x404, 0x0010253e); /* CC on - VBI_LINE_CTRL3, FLD_VBI_MD_LINE12 */ cx25840_write(client, state->vbi_regs_offset + 0x42f, 0x66); /* HVR-1250 / HVR1850 DIF related */ /* Power everything up */ cx25840_write4(client, 0x130, 0x0); /* SRC_COMB_CFG */ if (is_cx23888(state)) cx25840_write4(client, 0x454, 0x6628021F); else cx25840_write4(client, 0x478, 0x6628021F); /* AFE_CLK_OUT_CTRL - Select the clock output source as output */ cx25840_write4(client, 0x144, 0x5); /* I2C_OUT_CTL - I2S output configuration as * Master, Sony, Left justified, left sample on WS=1 */ cx25840_write4(client, 0x918, 0x1a0); /* AFE_DIAG_CTRL1 */ cx25840_write4(client, 0x134, 0x000a1800); /* AFE_DIAG_CTRL3 - Inverted Polarity for Audio and Video */ cx25840_write4(client, 0x13c, 0x00310000); } /* ----------------------------------------------------------------------- */ static void cx231xx_initialize(struct i2c_client *client) { DEFINE_WAIT(wait); struct cx25840_state *state = to_state(i2c_get_clientdata(client)); struct workqueue_struct *q; /* Internal Reset */ cx25840_and_or(client, 0x102, ~0x01, 0x01); cx25840_and_or(client, 0x102, ~0x01, 0x00); /* Stop microcontroller */ cx25840_and_or(client, 0x803, ~0x10, 0x00); /* DIF in reset? */ cx25840_write(client, 0x398, 0); /* Trust the default xtal, no division */ /* This changes for the cx23888 products */ cx25840_write(client, 0x2, 0x76); /* Bring down the regulator for AUX clk */ cx25840_write(client, 0x1, 0x40); /* Disable DIF bypass */ cx25840_write4(client, 0x33c, 0x00000001); /* DIF Src phase inc */ cx25840_write4(client, 0x340, 0x0df7df83); /* Luma */ cx25840_write4(client, 0x414, 0x00107d12); /* Chroma */ cx25840_write4(client, 0x420, 0x3d008282); /* ADC2 input select */ cx25840_write(client, 0x102, 0x10); /* VIN1 & VIN5 */ cx25840_write(client, 0x103, 0x11); /* Enable format auto detect */ cx25840_write(client, 0x400, 0); /* Fast subchroma lock */ /* White crush, Chroma AGC & Chroma Killer enabled */ cx25840_write(client, 0x401, 0xe8); /* * Do the firmware load in a work handler to prevent. * Otherwise the kernel is blocked waiting for the * bit-banging i2c interface to finish uploading the * firmware. */ INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); if (q) { prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); queue_work(q, &state->fw_work); schedule(); finish_wait(&state->fw_wait, &wait); destroy_workqueue(q); } cx25840_std_setup(client); /* (re)set input */ set_input(client, state->vid_input, state->aud_input); /* start microcontroller */ cx25840_and_or(client, 0x803, ~0x10, 0x10); /* CC raw enable */ cx25840_write(client, 0x404, 0x0b); /* CC on */ cx25840_write(client, 0x42f, 0x66); cx25840_write4(client, 0x474, 0x1e1e601a); } /* ----------------------------------------------------------------------- */ void cx25840_std_setup(struct i2c_client *client) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); v4l2_std_id std = state->std; int hblank, hactive, burst, vblank, vactive, sc; int vblank656, src_decimation; int luma_lpf, uv_lpf, comb; u32 pll_int, pll_frac, pll_post; /* datasheet startup, step 8d */ if (std & ~V4L2_STD_NTSC) cx25840_write(client, 0x49f, 0x11); else cx25840_write(client, 0x49f, 0x14); /* generic mode uses the values that the chip autoconfig would set */ if (std & V4L2_STD_625_50) { hblank = 132; hactive = 720; burst = 93; if (state->generic_mode) { vblank = 34; vactive = 576; vblank656 = 38; } else { vblank = 36; vactive = 580; vblank656 = 40; } src_decimation = 0x21f; luma_lpf = 2; if (std & V4L2_STD_SECAM) { uv_lpf = 0; comb = 0; sc = 0x0a425f; } else if (std == V4L2_STD_PAL_Nc) { if (state->generic_mode) { burst = 95; luma_lpf = 1; } uv_lpf = 1; comb = 0x20; sc = 556453; } else { uv_lpf = 1; comb = 0x20; sc = 688739; } } else { hactive = 720; hblank = 122; vactive = 487; luma_lpf = 1; uv_lpf = 1; if (state->generic_mode) { vblank = 20; vblank656 = 24; } src_decimation = 0x21f; if (std == V4L2_STD_PAL_60) { if (!state->generic_mode) { vblank = 26; vblank656 = 26; burst = 0x5b; } else { burst = 0x59; } luma_lpf = 2; comb = 0x20; sc = 688739; } else if (std == V4L2_STD_PAL_M) { vblank = 20; vblank656 = 24; burst = 0x61; comb = 0x20; sc = 555452; } else { if (!state->generic_mode) { vblank = 26; vblank656 = 26; } burst = 0x5b; comb = 0x66; sc = 556063; } } /* DEBUG: Displays configured PLL frequency */ if (!is_cx231xx(state)) { pll_int = cx25840_read(client, 0x108); pll_frac = cx25840_read4(client, 0x10c) & 0x1ffffff; pll_post = cx25840_read(client, 0x109); v4l_dbg(1, cx25840_debug, client, "PLL regs = int: %u, frac: %u, post: %u\n", pll_int, pll_frac, pll_post); if (pll_post) { int fin, fsc; int pll = (28636363L * ((((u64)pll_int) << 25L) + pll_frac)) >> 25L; pll /= pll_post; v4l_dbg(1, cx25840_debug, client, "PLL = %d.%06d MHz\n", pll / 1000000, pll % 1000000); v4l_dbg(1, cx25840_debug, client, "PLL/8 = %d.%06d MHz\n", pll / 8000000, (pll / 8) % 1000000); fin = ((u64)src_decimation * pll) >> 12; v4l_dbg(1, cx25840_debug, client, "ADC Sampling freq = %d.%06d MHz\n", fin / 1000000, fin % 1000000); fsc = (((u64)sc) * pll) >> 24L; v4l_dbg(1, cx25840_debug, client, "Chroma sub-carrier freq = %d.%06d MHz\n", fsc / 1000000, fsc % 1000000); v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n", hblank, hactive, vblank, vactive, vblank656, src_decimation, burst, luma_lpf, uv_lpf, comb, sc); } } /* Sets horizontal blanking delay and active lines */ cx25840_write(client, 0x470, hblank); cx25840_write(client, 0x471, (((hblank >> 8) & 0x3) | (hactive << 4)) & 0xff); cx25840_write(client, 0x472, hactive >> 4); /* Sets burst gate delay */ cx25840_write(client, 0x473, burst); /* Sets vertical blanking delay and active duration */ cx25840_write(client, 0x474, vblank); cx25840_write(client, 0x475, (((vblank >> 8) & 0x3) | (vactive << 4)) & 0xff); cx25840_write(client, 0x476, vactive >> 4); cx25840_write(client, 0x477, vblank656); /* Sets src decimation rate */ cx25840_write(client, 0x478, src_decimation & 0xff); cx25840_write(client, 0x479, (src_decimation >> 8) & 0xff); /* Sets Luma and UV Low pass filters */ cx25840_write(client, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30)); /* Enables comb filters */ cx25840_write(client, 0x47b, comb); /* Sets SC Step*/ cx25840_write(client, 0x47c, sc); cx25840_write(client, 0x47d, (sc >> 8) & 0xff); cx25840_write(client, 0x47e, (sc >> 16) & 0xff); /* Sets VBI parameters */ if (std & V4L2_STD_625_50) { cx25840_write(client, 0x47f, 0x01); state->vbi_line_offset = 5; } else { cx25840_write(client, 0x47f, 0x00); state->vbi_line_offset = 8; } } /* ----------------------------------------------------------------------- */ static void input_change(struct i2c_client *client) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); v4l2_std_id std = state->std; /* Follow step 8c and 8d of section 3.16 in the cx25840 datasheet */ if (std & V4L2_STD_SECAM) { cx25840_write(client, 0x402, 0); } else { cx25840_write(client, 0x402, 0x04); cx25840_write(client, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11); } cx25840_and_or(client, 0x401, ~0x60, 0); cx25840_and_or(client, 0x401, ~0x60, 0x60); /* Don't write into audio registers on cx2583x chips */ if (is_cx2583x(state)) return; cx25840_and_or(client, 0x810, ~0x01, 1); if (state->radio) { cx25840_write(client, 0x808, 0xf9); cx25840_write(client, 0x80b, 0x00); } else if (std & V4L2_STD_525_60) { /* * Certain Hauppauge PVR150 models have a hardware bug * that causes audio to drop out. For these models the * audio standard must be set explicitly. * To be precise: it affects cards with tuner models * 85, 99 and 112 (model numbers from tveeprom). */ int hw_fix = state->pvr150_workaround; if (std == V4L2_STD_NTSC_M_JP) { /* Japan uses EIAJ audio standard */ cx25840_write(client, 0x808, hw_fix ? 0x2f : 0xf7); } else if (std == V4L2_STD_NTSC_M_KR) { /* South Korea uses A2 audio standard */ cx25840_write(client, 0x808, hw_fix ? 0x3f : 0xf8); } else { /* Others use the BTSC audio standard */ cx25840_write(client, 0x808, hw_fix ? 0x1f : 0xf6); } cx25840_write(client, 0x80b, 0x00); } else if (std & V4L2_STD_PAL) { /* Autodetect audio standard and audio system */ cx25840_write(client, 0x808, 0xff); /* * Since system PAL-L is pretty much non-existent and * not used by any public broadcast network, force * 6.5 MHz carrier to be interpreted as System DK, * this avoids DK audio detection instability */ cx25840_write(client, 0x80b, 0x00); } else if (std & V4L2_STD_SECAM) { /* Autodetect audio standard and audio system */ cx25840_write(client, 0x808, 0xff); /* * If only one of SECAM-DK / SECAM-L is required, then force * 6.5MHz carrier, else autodetect it */ if ((std & V4L2_STD_SECAM_DK) && !(std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))) { /* 6.5 MHz carrier to be interpreted as System DK */ cx25840_write(client, 0x80b, 0x00); } else if (!(std & V4L2_STD_SECAM_DK) && (std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))) { /* 6.5 MHz carrier to be interpreted as System L */ cx25840_write(client, 0x80b, 0x08); } else { /* 6.5 MHz carrier to be autodetected */ cx25840_write(client, 0x80b, 0x10); } } cx25840_and_or(client, 0x810, ~0x01, 0); } static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input, enum cx25840_audio_input aud_input) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); u8 is_composite = (vid_input >= CX25840_COMPOSITE1 && vid_input <= CX25840_COMPOSITE8); u8 is_component = (vid_input & CX25840_COMPONENT_ON) == CX25840_COMPONENT_ON; u8 is_dif = (vid_input & CX25840_DIF_ON) == CX25840_DIF_ON; u8 is_svideo = (vid_input & CX25840_SVIDEO_ON) == CX25840_SVIDEO_ON; int luma = vid_input & 0xf0; int chroma = vid_input & 0xf00; u8 reg; u32 val; v4l_dbg(1, cx25840_debug, client, "decoder set video input %d, audio input %d\n", vid_input, aud_input); if (vid_input >= CX25840_VIN1_CH1) { v4l_dbg(1, cx25840_debug, client, "vid_input 0x%x\n", vid_input); reg = vid_input & 0xff; is_composite = !is_component && ((vid_input & CX25840_SVIDEO_ON) != CX25840_SVIDEO_ON); v4l_dbg(1, cx25840_debug, client, "mux cfg 0x%x comp=%d\n", reg, is_composite); } else if (is_composite) { reg = 0xf0 + (vid_input - CX25840_COMPOSITE1); } else { if ((vid_input & ~0xff0) || luma < CX25840_SVIDEO_LUMA1 || luma > CX25840_SVIDEO_LUMA8 || chroma < CX25840_SVIDEO_CHROMA4 || chroma > CX25840_SVIDEO_CHROMA8) { v4l_err(client, "0x%04x is not a valid video input!\n", vid_input); return -EINVAL; } reg = 0xf0 + ((luma - CX25840_SVIDEO_LUMA1) >> 4); if (chroma >= CX25840_SVIDEO_CHROMA7) { reg &= 0x3f; reg |= (chroma - CX25840_SVIDEO_CHROMA7) >> 2; } else { reg &= 0xcf; reg |= (chroma - CX25840_SVIDEO_CHROMA4) >> 4; } } /* The caller has previously prepared the correct routing * configuration in reg (for the cx23885) so we have no * need to attempt to flip bits for earlier av decoders. */ if (!is_cx2388x(state) && !is_cx231xx(state)) { switch (aud_input) { case CX25840_AUDIO_SERIAL: /* do nothing, use serial audio input */ break; case CX25840_AUDIO4: reg &= ~0x30; break; case CX25840_AUDIO5: reg &= ~0x30; reg |= 0x10; break; case CX25840_AUDIO6: reg &= ~0x30; reg |= 0x20; break; case CX25840_AUDIO7: reg &= ~0xc0; break; case CX25840_AUDIO8: reg &= ~0xc0; reg |= 0x40; break; default: v4l_err(client, "0x%04x is not a valid audio input!\n", aud_input); return -EINVAL; } } cx25840_write(client, 0x103, reg); /* Set INPUT_MODE to Composite, S-Video or Component */ if (is_component) cx25840_and_or(client, 0x401, ~0x6, 0x6); else cx25840_and_or(client, 0x401, ~0x6, is_composite ? 0 : 0x02); if (is_cx2388x(state)) { /* Enable or disable the DIF for tuner use */ if (is_dif) { cx25840_and_or(client, 0x102, ~0x80, 0x80); /* Set of defaults for NTSC and PAL */ cx25840_write4(client, 0x31c, 0xc2262600); cx25840_write4(client, 0x320, 0xc2262600); /* 18271 IF - Nobody else yet uses a different * tuner with the DIF, so these are reasonable * assumptions (HVR1250 and HVR1850 specific). */ cx25840_write4(client, 0x318, 0xda262600); cx25840_write4(client, 0x33c, 0x2a24c800); cx25840_write4(client, 0x104, 0x0704dd00); } else { cx25840_write4(client, 0x300, 0x015c28f5); cx25840_and_or(client, 0x102, ~0x80, 0); cx25840_write4(client, 0x340, 0xdf7df83); cx25840_write4(client, 0x104, 0x0704dd80); cx25840_write4(client, 0x314, 0x22400600); cx25840_write4(client, 0x318, 0x40002600); cx25840_write4(client, 0x324, 0x40002600); cx25840_write4(client, 0x32c, 0x0250e620); cx25840_write4(client, 0x39c, 0x01FF0B00); cx25840_write4(client, 0x410, 0xffff0dbf); cx25840_write4(client, 0x414, 0x00137d03); if (is_cx23888(state)) { /* 888 MISC_TIM_CTRL */ cx25840_write4(client, 0x42c, 0x42600000); /* 888 FIELD_COUNT */ cx25840_write4(client, 0x430, 0x0000039b); /* 888 VSCALE_CTRL */ cx25840_write4(client, 0x438, 0x00000000); /* 888 DFE_CTRL1 */ cx25840_write4(client, 0x440, 0xF8E3E824); /* 888 DFE_CTRL2 */ cx25840_write4(client, 0x444, 0x401040dc); /* 888 DFE_CTRL3 */ cx25840_write4(client, 0x448, 0xcd3f02a0); /* 888 PLL_CTRL */ cx25840_write4(client, 0x44c, 0x161f1000); /* 888 HTL_CTRL */ cx25840_write4(client, 0x450, 0x00000802); } cx25840_write4(client, 0x91c, 0x01000000); cx25840_write4(client, 0x8e0, 0x03063870); cx25840_write4(client, 0x8d4, 0x7FFF0024); cx25840_write4(client, 0x8d0, 0x00063073); cx25840_write4(client, 0x8c8, 0x00010000); cx25840_write4(client, 0x8cc, 0x00080023); /* DIF BYPASS */ cx25840_write4(client, 0x33c, 0x2a04c800); } /* Reset the DIF */ cx25840_write4(client, 0x398, 0); } if (!is_cx2388x(state) && !is_cx231xx(state)) { /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */ cx25840_and_or(client, 0x102, ~0x2, (reg & 0x80) == 0 ? 2 : 0); /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2&CH3 */ if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30) cx25840_and_or(client, 0x102, ~0x4, 4); else cx25840_and_or(client, 0x102, ~0x4, 0); } else { /* Set DUAL_MODE_ADC2 to 1 if component*/ cx25840_and_or(client, 0x102, ~0x4, is_component ? 0x4 : 0x0); if (is_composite) { /* ADC2 input select channel 2 */ cx25840_and_or(client, 0x102, ~0x2, 0); } else if (!is_component) { /* S-Video */ if (chroma >= CX25840_SVIDEO_CHROMA7) { /* ADC2 input select channel 3 */ cx25840_and_or(client, 0x102, ~0x2, 2); } else { /* ADC2 input select channel 2 */ cx25840_and_or(client, 0x102, ~0x2, 0); } } /* cx23885 / SVIDEO */ if (is_cx2388x(state) && is_svideo) { #define AFE_CTRL (0x104) #define MODE_CTRL (0x400) cx25840_and_or(client, 0x102, ~0x2, 0x2); val = cx25840_read4(client, MODE_CTRL); val &= 0xFFFFF9FF; /* YC */ val |= 0x00000200; val &= ~0x2000; cx25840_write4(client, MODE_CTRL, val); val = cx25840_read4(client, AFE_CTRL); /* Chroma in select */ val |= 0x00001000; val &= 0xfffffe7f; /* Clear VGA_SEL_CH2 and VGA_SEL_CH3 (bits 7 and 8). * This sets them to use video rather than audio. * Only one of the two will be in use. */ cx25840_write4(client, AFE_CTRL, val); } else { cx25840_and_or(client, 0x102, ~0x2, 0); } } state->vid_input = vid_input; state->aud_input = aud_input; cx25840_audio_set_path(client); input_change(client); if (is_cx2388x(state)) { /* Audio channel 1 src : Parallel 1 */ cx25840_write(client, 0x124, 0x03); /* Select AFE clock pad output source */ cx25840_write(client, 0x144, 0x05); /* I2S_IN_CTL: I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1 */ cx25840_write(client, 0x914, 0xa0); /* I2S_OUT_CTL: * I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1 * I2S_OUT_MASTER_MODE = Master */ cx25840_write(client, 0x918, 0xa0); cx25840_write(client, 0x919, 0x01); } else if (is_cx231xx(state)) { /* Audio channel 1 src : Parallel 1 */ cx25840_write(client, 0x124, 0x03); /* I2S_IN_CTL: I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1 */ cx25840_write(client, 0x914, 0xa0); /* I2S_OUT_CTL: * I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1 * I2S_OUT_MASTER_MODE = Master */ cx25840_write(client, 0x918, 0xa0); cx25840_write(client, 0x919, 0x01); } if (is_cx2388x(state) && ((aud_input == CX25840_AUDIO7) || (aud_input == CX25840_AUDIO6))) { /* Configure audio from LR1 or LR2 input */ cx25840_write4(client, 0x910, 0); cx25840_write4(client, 0x8d0, 0x63073); } else if (is_cx2388x(state) && (aud_input == CX25840_AUDIO8)) { /* Configure audio from tuner/sif input */ cx25840_write4(client, 0x910, 0x12b000c9); cx25840_write4(client, 0x8d0, 0x1f063870); } if (is_cx23888(state)) { /* * HVR1850 * * AUD_IO_CTRL - I2S Input, Parallel1 * - Channel 1 src - Parallel1 (Merlin out) * - Channel 2 src - Parallel2 (Merlin out) * - Channel 3 src - Parallel3 (Merlin AC97 out) * - I2S source and dir - Merlin, output */ cx25840_write4(client, 0x124, 0x100); if (!is_dif) { /* * Stop microcontroller if we don't need it * to avoid audio popping on svideo/composite use. */ cx25840_and_or(client, 0x803, ~0x10, 0x00); } } return 0; } /* ----------------------------------------------------------------------- */ static int set_v4lstd(struct i2c_client *client) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); u8 fmt = 0; /* zero is autodetect */ u8 pal_m = 0; /* First tests should be against specific std */ if (state->std == V4L2_STD_NTSC_M_JP) { fmt = 0x2; } else if (state->std == V4L2_STD_NTSC_443) { fmt = 0x3; } else if (state->std == V4L2_STD_PAL_M) { pal_m = 1; fmt = 0x5; } else if (state->std == V4L2_STD_PAL_N) { fmt = 0x6; } else if (state->std == V4L2_STD_PAL_Nc) { fmt = 0x7; } else if (state->std == V4L2_STD_PAL_60) { fmt = 0x8; } else { /* Then, test against generic ones */ if (state->std & V4L2_STD_NTSC) fmt = 0x1; else if (state->std & V4L2_STD_PAL) fmt = 0x4; else if (state->std & V4L2_STD_SECAM) fmt = 0xc; } v4l_dbg(1, cx25840_debug, client, "changing video std to fmt %i\n", fmt); /* * Follow step 9 of section 3.16 in the cx25840 datasheet. * Without this PAL may display a vertical ghosting effect. * This happens for example with the Yuan MPC622. */ if (fmt >= 4 && fmt < 8) { /* Set format to NTSC-M */ cx25840_and_or(client, 0x400, ~0xf, 1); /* Turn off LCOMB */ cx25840_and_or(client, 0x47b, ~6, 0); } cx25840_and_or(client, 0x400, ~0xf, fmt); cx25840_and_or(client, 0x403, ~0x3, pal_m); if (is_cx23888(state)) cx23888_std_setup(client); else cx25840_std_setup(client); if (!is_cx2583x(state)) input_change(client); return 0; } /* ----------------------------------------------------------------------- */ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: cx25840_write(client, 0x414, ctrl->val - 128); break; case V4L2_CID_CONTRAST: cx25840_write(client, 0x415, ctrl->val << 1); break; case V4L2_CID_SATURATION: if (is_cx23888(state)) { cx25840_write(client, 0x418, ctrl->val << 1); cx25840_write(client, 0x419, ctrl->val << 1); } else { cx25840_write(client, 0x420, ctrl->val << 1); cx25840_write(client, 0x421, ctrl->val << 1); } break; case V4L2_CID_HUE: if (is_cx23888(state)) cx25840_write(client, 0x41a, ctrl->val); else cx25840_write(client, 0x422, ctrl->val); break; default: return -EINVAL; } return 0; } /* ----------------------------------------------------------------------- */ static int cx25840_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u32 hsc, vsc, v_src, h_src, v_add; int filter; int is_50hz = !(state->std & V4L2_STD_525_60); if (format->pad || fmt->code != MEDIA_BUS_FMT_FIXED) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; if (is_cx23888(state)) { v_src = (cx25840_read(client, 0x42a) & 0x3f) << 4; v_src |= (cx25840_read(client, 0x429) & 0xf0) >> 4; } else { v_src = (cx25840_read(client, 0x476) & 0x3f) << 4; v_src |= (cx25840_read(client, 0x475) & 0xf0) >> 4; } if (is_cx23888(state)) { h_src = (cx25840_read(client, 0x426) & 0x3f) << 4; h_src |= (cx25840_read(client, 0x425) & 0xf0) >> 4; } else { h_src = (cx25840_read(client, 0x472) & 0x3f) << 4; h_src |= (cx25840_read(client, 0x471) & 0xf0) >> 4; } if (!state->generic_mode) { v_add = is_50hz ? 4 : 7; /* * cx23888 in 525-line mode is programmed for 486 active lines * while other chips use 487 active lines. * * See reg 0x428 bits [21:12] in cx23888_std_setup() vs * vactive in cx25840_std_setup(). */ if (is_cx23888(state) && !is_50hz) v_add--; } else { v_add = 0; } if (h_src == 0 || v_src <= v_add) { v4l_err(client, "chip reported picture size (%u x %u) is far too small\n", (unsigned int)h_src, (unsigned int)v_src); /* * that's the best we can do since the output picture * size is completely unknown in this case */ return -EINVAL; } fmt->width = clamp(fmt->width, (h_src + 15) / 16, h_src); if (v_add * 8 >= v_src) fmt->height = clamp(fmt->height, (u32)1, v_src - v_add); else fmt->height = clamp(fmt->height, (v_src - v_add * 8 + 7) / 8, v_src - v_add); if (format->which == V4L2_SUBDEV_FORMAT_TRY) return 0; hsc = (h_src * (1 << 20)) / fmt->width - (1 << 20); vsc = (1 << 16) - (v_src * (1 << 9) / (fmt->height + v_add) - (1 << 9)); vsc &= 0x1fff; if (fmt->width >= 385) filter = 0; else if (fmt->width > 192) filter = 1; else if (fmt->width > 96) filter = 2; else filter = 3; v4l_dbg(1, cx25840_debug, client, "decoder set size %u x %u with scale %x x %x\n", (unsigned int)fmt->width, (unsigned int)fmt->height, (unsigned int)hsc, (unsigned int)vsc); /* HSCALE=hsc */ if (is_cx23888(state)) { cx25840_write4(client, 0x434, hsc | (1 << 24)); /* VSCALE=vsc VS_INTRLACE=1 VFILT=filter */ cx25840_write4(client, 0x438, vsc | (1 << 19) | (filter << 16)); } else { cx25840_write(client, 0x418, hsc & 0xff); cx25840_write(client, 0x419, (hsc >> 8) & 0xff); cx25840_write(client, 0x41a, hsc >> 16); /* VSCALE=vsc */ cx25840_write(client, 0x41c, vsc & 0xff); cx25840_write(client, 0x41d, vsc >> 8); /* VS_INTRLACE=1 VFILT=filter */ cx25840_write(client, 0x41e, 0x8 | filter); } return 0; } /* ----------------------------------------------------------------------- */ static void log_video_status(struct i2c_client *client) { static const char *const fmt_strs[] = { "0x0", "NTSC-M", "NTSC-J", "NTSC-4.43", "PAL-BDGHI", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60", "0x9", "0xA", "0xB", "SECAM", "0xD", "0xE", "0xF" }; struct cx25840_state *state = to_state(i2c_get_clientdata(client)); u8 vidfmt_sel = cx25840_read(client, 0x400) & 0xf; u8 gen_stat1 = cx25840_read(client, 0x40d); u8 gen_stat2 = cx25840_read(client, 0x40e); int vid_input = state->vid_input; v4l_info(client, "Video signal: %spresent\n", (gen_stat2 & 0x20) ? "" : "not "); v4l_info(client, "Detected format: %s\n", fmt_strs[gen_stat1 & 0xf]); v4l_info(client, "Specified standard: %s\n", vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection"); if (vid_input >= CX25840_COMPOSITE1 && vid_input <= CX25840_COMPOSITE8) { v4l_info(client, "Specified video input: Composite %d\n", vid_input - CX25840_COMPOSITE1 + 1); } else { v4l_info(client, "Specified video input: S-Video (Luma In%d, Chroma In%d)\n", (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8); } v4l_info(client, "Specified audioclock freq: %d Hz\n", state->audclk_freq); } /* ----------------------------------------------------------------------- */ static void log_audio_status(struct i2c_client *client) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); u8 download_ctl = cx25840_read(client, 0x803); u8 mod_det_stat0 = cx25840_read(client, 0x804); u8 mod_det_stat1 = cx25840_read(client, 0x805); u8 audio_config = cx25840_read(client, 0x808); u8 pref_mode = cx25840_read(client, 0x809); u8 afc0 = cx25840_read(client, 0x80b); u8 mute_ctl = cx25840_read(client, 0x8d3); int aud_input = state->aud_input; char *p; switch (mod_det_stat0) { case 0x00: p = "mono"; break; case 0x01: p = "stereo"; break; case 0x02: p = "dual"; break; case 0x04: p = "tri"; break; case 0x10: p = "mono with SAP"; break; case 0x11: p = "stereo with SAP"; break; case 0x12: p = "dual with SAP"; break; case 0x14: p = "tri with SAP"; break; case 0xfe: p = "forced mode"; break; default: p = "not defined"; } v4l_info(client, "Detected audio mode: %s\n", p); switch (mod_det_stat1) { case 0x00: p = "not defined"; break; case 0x01: p = "EIAJ"; break; case 0x02: p = "A2-M"; break; case 0x03: p = "A2-BG"; break; case 0x04: p = "A2-DK1"; break; case 0x05: p = "A2-DK2"; break; case 0x06: p = "A2-DK3"; break; case 0x07: p = "A1 (6.0 MHz FM Mono)"; break; case 0x08: p = "AM-L"; break; case 0x09: p = "NICAM-BG"; break; case 0x0a: p = "NICAM-DK"; break; case 0x0b: p = "NICAM-I"; break; case 0x0c: p = "NICAM-L"; break; case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break; case 0x0e: p = "IF FM Radio"; break; case 0x0f: p = "BTSC"; break; case 0x10: p = "high-deviation FM"; break; case 0x11: p = "very high-deviation FM"; break; case 0xfd: p = "unknown audio standard"; break; case 0xfe: p = "forced audio standard"; break; case 0xff: p = "no detected audio standard"; break; default: p = "not defined"; } v4l_info(client, "Detected audio standard: %s\n", p); v4l_info(client, "Audio microcontroller: %s\n", (download_ctl & 0x10) ? ((mute_ctl & 0x2) ? "detecting" : "running") : "stopped"); switch (audio_config >> 4) { case 0x00: p = "undefined"; break; case 0x01: p = "BTSC"; break; case 0x02: p = "EIAJ"; break; case 0x03: p = "A2-M"; break; case 0x04: p = "A2-BG"; break; case 0x05: p = "A2-DK1"; break; case 0x06: p = "A2-DK2"; break; case 0x07: p = "A2-DK3"; break; case 0x08: p = "A1 (6.0 MHz FM Mono)"; break; case 0x09: p = "AM-L"; break; case 0x0a: p = "NICAM-BG"; break; case 0x0b: p = "NICAM-DK"; break; case 0x0c: p = "NICAM-I"; break; case 0x0d: p = "NICAM-L"; break; case 0x0e: p = "FM radio"; break; case 0x0f: p = "automatic detection"; break; default: p = "undefined"; } v4l_info(client, "Configured audio standard: %s\n", p); if ((audio_config >> 4) < 0xF) { switch (audio_config & 0xF) { case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break; case 0x01: p = "MONO2 (LANGUAGE B)"; break; case 0x02: p = "MONO3 (STEREO forced MONO)"; break; case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break; case 0x04: p = "STEREO"; break; case 0x05: p = "DUAL1 (AB)"; break; case 0x06: p = "DUAL2 (AC) (FM)"; break; case 0x07: p = "DUAL3 (BC) (FM)"; break; case 0x08: p = "DUAL4 (AC) (AM)"; break; case 0x09: p = "DUAL5 (BC) (AM)"; break; case 0x0a: p = "SAP"; break; default: p = "undefined"; } v4l_info(client, "Configured audio mode: %s\n", p); } else { switch (audio_config & 0xF) { case 0x00: p = "BG"; break; case 0x01: p = "DK1"; break; case 0x02: p = "DK2"; break; case 0x03: p = "DK3"; break; case 0x04: p = "I"; break; case 0x05: p = "L"; break; case 0x06: p = "BTSC"; break; case 0x07: p = "EIAJ"; break; case 0x08: p = "A2-M"; break; case 0x09: p = "FM Radio"; break; case 0x0f: p = "automatic standard and mode detection"; break; default: p = "undefined"; } v4l_info(client, "Configured audio system: %s\n", p); } if (aud_input) { v4l_info(client, "Specified audio input: Tuner (In%d)\n", aud_input); } else { v4l_info(client, "Specified audio input: External\n"); } switch (pref_mode & 0xf) { case 0: p = "mono/language A"; break; case 1: p = "language B"; break; case 2: p = "language C"; break; case 3: p = "analog fallback"; break; case 4: p = "stereo"; break; case 5: p = "language AC"; break; case 6: p = "language BC"; break; case 7: p = "language AB"; break; default: p = "undefined"; } v4l_info(client, "Preferred audio mode: %s\n", p); if ((audio_config & 0xf) == 0xf) { switch ((afc0 >> 3) & 0x3) { case 0: p = "system DK"; break; case 1: p = "system L"; break; case 2: p = "autodetect"; break; default: p = "undefined"; } v4l_info(client, "Selected 65 MHz format: %s\n", p); switch (afc0 & 0x7) { case 0: p = "chroma"; break; case 1: p = "BTSC"; break; case 2: p = "EIAJ"; break; case 3: p = "A2-M"; break; case 4: p = "autodetect"; break; default: p = "undefined"; } v4l_info(client, "Selected 45 MHz format: %s\n", p); } } #define CX25840_VCONFIG_OPTION(state, cfg_in, opt_msk) \ do { \ if ((cfg_in) & (opt_msk)) { \ (state)->vid_config &= ~(opt_msk); \ (state)->vid_config |= (cfg_in) & (opt_msk); \ } \ } while (0) /* apply incoming options to the current vconfig */ static void cx25840_vconfig_add(struct cx25840_state *state, u32 cfg_in) { CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_FMT_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_RES_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_VBIRAW_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_ANCDATA_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_TASKBIT_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_ACTIVE_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_VALID_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_HRESETW_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_CLKGATE_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_DCMODE_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_IDID0S_MASK); CX25840_VCONFIG_OPTION(state, cfg_in, CX25840_VCONFIG_VIPCLAMP_MASK); } /* ----------------------------------------------------------------------- */ /* * Initializes the device in the generic mode. * For cx2584x chips also adds additional video output settings provided * in @val parameter (CX25840_VCONFIG_*). * * The generic mode disables some of the ivtv-related hacks in this driver. * For cx2584x chips it also enables setting video output configuration while * setting it according to datasheet defaults by default. */ static int cx25840_init(struct v4l2_subdev *sd, u32 val) { struct cx25840_state *state = to_state(sd); state->generic_mode = true; if (is_cx2584x(state)) { /* set datasheet video output defaults */ state->vid_config = CX25840_VCONFIG_FMT_BT656 | CX25840_VCONFIG_RES_8BIT | CX25840_VCONFIG_VBIRAW_DISABLED | CX25840_VCONFIG_ANCDATA_ENABLED | CX25840_VCONFIG_TASKBIT_ONE | CX25840_VCONFIG_ACTIVE_HORIZONTAL | CX25840_VCONFIG_VALID_NORMAL | CX25840_VCONFIG_HRESETW_NORMAL | CX25840_VCONFIG_CLKGATE_NONE | CX25840_VCONFIG_DCMODE_DWORDS | CX25840_VCONFIG_IDID0S_NORMAL | CX25840_VCONFIG_VIPCLAMP_DISABLED; /* add additional settings */ cx25840_vconfig_add(state, val); } else { /* TODO: generic mode needs to be developed for other chips */ WARN_ON(1); } return 0; } static int cx25840_reset(struct v4l2_subdev *sd, u32 val) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (is_cx2583x(state)) cx25836_initialize(client); else if (is_cx2388x(state)) cx23885_initialize(client); else if (is_cx231xx(state)) cx231xx_initialize(client); else cx25840_initialize(client); state->is_initialized = 1; return 0; } /* * This load_fw operation must be called to load the driver's firmware. * This will load the firmware on the first invocation (further ones are NOP). * Without this the audio standard detection will fail and you will * only get mono. * Alternatively, you can call the reset operation instead of this one. * * Since loading the firmware is often problematic when the driver is * compiled into the kernel I recommend postponing calling this function * until the first open of the video device. Another reason for * postponing it is that loading this firmware takes a long time (seconds) * due to the slow i2c bus speed. So it will speed up the boot process if * you can avoid loading the fw as long as the video device isn't used. */ static int cx25840_load_fw(struct v4l2_subdev *sd) { struct cx25840_state *state = to_state(sd); if (!state->is_initialized) { /* initialize and load firmware */ cx25840_reset(sd, 0); } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int cx25840_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); reg->size = 1; reg->val = cx25840_read(client, reg->reg & 0x0fff); return 0; } static int cx25840_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); cx25840_write(client, reg->reg & 0x0fff, reg->val & 0xff); return 0; } #endif static int cx25840_s_audio_stream(struct v4l2_subdev *sd, int enable) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 v; if (is_cx2583x(state) || is_cx2388x(state) || is_cx231xx(state)) return 0; v4l_dbg(1, cx25840_debug, client, "%s audio output\n", enable ? "enable" : "disable"); if (enable) { v = cx25840_read(client, 0x115) | 0x80; cx25840_write(client, 0x115, v); v = cx25840_read(client, 0x116) | 0x03; cx25840_write(client, 0x116, v); } else { v = cx25840_read(client, 0x115) & ~(0x80); cx25840_write(client, 0x115, v); v = cx25840_read(client, 0x116) & ~(0x03); cx25840_write(client, 0x116, v); } return 0; } static int cx25840_s_stream(struct v4l2_subdev *sd, int enable) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 v; v4l_dbg(1, cx25840_debug, client, "%s video output\n", enable ? "enable" : "disable"); /* * It's not clear what should be done for these devices. * The original code used the same addresses as for the cx25840, but * those addresses do something else entirely on the cx2388x and * cx231xx. Since it never did anything in the first place, just do * nothing. */ if (is_cx2388x(state) || is_cx231xx(state)) return 0; if (enable) { v = cx25840_read(client, 0x115) | 0x0c; cx25840_write(client, 0x115, v); v = cx25840_read(client, 0x116) | 0x04; cx25840_write(client, 0x116, v); } else { v = cx25840_read(client, 0x115) & ~(0x0c); cx25840_write(client, 0x115, v); v = cx25840_read(client, 0x116) & ~(0x04); cx25840_write(client, 0x116, v); } return 0; } /* Query the current detected video format */ static int cx25840_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { struct i2c_client *client = v4l2_get_subdevdata(sd); static const v4l2_std_id stds[] = { /* 0000 */ V4L2_STD_UNKNOWN, /* 0001 */ V4L2_STD_NTSC_M, /* 0010 */ V4L2_STD_NTSC_M_JP, /* 0011 */ V4L2_STD_NTSC_443, /* 0100 */ V4L2_STD_PAL, /* 0101 */ V4L2_STD_PAL_M, /* 0110 */ V4L2_STD_PAL_N, /* 0111 */ V4L2_STD_PAL_Nc, /* 1000 */ V4L2_STD_PAL_60, /* 1001 */ V4L2_STD_UNKNOWN, /* 1010 */ V4L2_STD_UNKNOWN, /* 1011 */ V4L2_STD_UNKNOWN, /* 1100 */ V4L2_STD_SECAM, /* 1101 */ V4L2_STD_UNKNOWN, /* 1110 */ V4L2_STD_UNKNOWN, /* 1111 */ V4L2_STD_UNKNOWN }; u32 fmt = (cx25840_read4(client, 0x40c) >> 8) & 0xf; *std = stds[fmt]; v4l_dbg(1, cx25840_debug, client, "querystd fmt = %x, v4l2_std_id = 0x%x\n", fmt, (unsigned int)stds[fmt]); return 0; } static int cx25840_g_input_status(struct v4l2_subdev *sd, u32 *status) { struct i2c_client *client = v4l2_get_subdevdata(sd); /* * A limited function that checks for signal status and returns * the state. */ /* Check for status of Horizontal lock (SRC lock isn't reliable) */ if ((cx25840_read4(client, 0x40c) & 0x00010000) == 0) *status |= V4L2_IN_ST_NO_SIGNAL; return 0; } static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std) { struct cx25840_state *state = to_state(sd); *std = state->std; return 0; } static int cx25840_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (state->radio == 0 && state->std == std) return 0; state->radio = 0; state->std = std; return set_v4lstd(client); } static int cx25840_s_radio(struct v4l2_subdev *sd) { struct cx25840_state *state = to_state(sd); state->radio = 1; return 0; } static int cx25840_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (is_cx23888(state)) cx23888_std_setup(client); if (is_cx2584x(state) && state->generic_mode && config) { cx25840_vconfig_add(state, config); cx25840_vconfig_apply(client); } return set_input(client, input, state->aud_input); } static int cx25840_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (is_cx23888(state)) cx23888_std_setup(client); return set_input(client, state->vid_input, input); } static int cx25840_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *freq) { struct i2c_client *client = v4l2_get_subdevdata(sd); input_change(client); return 0; } static int cx25840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 vpres = cx25840_read(client, 0x40e) & 0x20; u8 mode; int val = 0; if (state->radio) return 0; vt->signal = vpres ? 0xffff : 0x0; if (is_cx2583x(state)) return 0; vt->capability |= V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP; mode = cx25840_read(client, 0x804); /* get rxsubchans and audmode */ if ((mode & 0xf) == 1) val |= V4L2_TUNER_SUB_STEREO; else val |= V4L2_TUNER_SUB_MONO; if (mode == 2 || mode == 4) val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; if (mode & 0x10) val |= V4L2_TUNER_SUB_SAP; vt->rxsubchans = val; vt->audmode = state->audmode; return 0; } static int cx25840_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (state->radio || is_cx2583x(state)) return 0; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: /* * mono -> mono * stereo -> mono * bilingual -> lang1 */ cx25840_and_or(client, 0x809, ~0xf, 0x00); break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: /* * mono -> mono * stereo -> stereo * bilingual -> lang1 */ cx25840_and_or(client, 0x809, ~0xf, 0x04); break; case V4L2_TUNER_MODE_LANG1_LANG2: /* * mono -> mono * stereo -> stereo * bilingual -> lang1/lang2 */ cx25840_and_or(client, 0x809, ~0xf, 0x07); break; case V4L2_TUNER_MODE_LANG2: /* * mono -> mono * stereo -> stereo * bilingual -> lang2 */ cx25840_and_or(client, 0x809, ~0xf, 0x01); break; default: return -EINVAL; } state->audmode = vt->audmode; return 0; } static int cx25840_log_status(struct v4l2_subdev *sd) { struct cx25840_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); log_video_status(client); if (!is_cx2583x(state)) log_audio_status(client); cx25840_ir_log_status(sd); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled) { struct cx25840_state *state = to_state(sd); struct i2c_client *c = v4l2_get_subdevdata(sd); u8 irq_stat, aud_stat, aud_en, ir_stat, ir_en; u32 vid_stat, aud_mc_stat; bool block_handled; int ret = 0; irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG); v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (entry): %s %s %s\n", irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT ? "ir" : " ", irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT ? "aud" : " ", irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT ? "vid" : " "); if ((is_cx23885(state) || is_cx23887(state))) { ir_stat = cx25840_read(c, CX25840_IR_STATS_REG); ir_en = cx25840_read(c, CX25840_IR_IRQEN_REG); v4l_dbg(2, cx25840_debug, c, "AV Core ir IRQ status: %#04x disables: %#04x\n", ir_stat, ir_en); if (irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT) { block_handled = false; ret = cx25840_ir_irq_handler(sd, status, &block_handled); if (block_handled) *handled = true; } } aud_stat = cx25840_read(c, CX25840_AUD_INT_STAT_REG); aud_en = cx25840_read(c, CX25840_AUD_INT_CTRL_REG); v4l_dbg(2, cx25840_debug, c, "AV Core audio IRQ status: %#04x disables: %#04x\n", aud_stat, aud_en); aud_mc_stat = cx25840_read4(c, CX23885_AUD_MC_INT_MASK_REG); v4l_dbg(2, cx25840_debug, c, "AV Core audio MC IRQ status: %#06x enables: %#06x\n", aud_mc_stat >> CX23885_AUD_MC_INT_STAT_SHFT, aud_mc_stat & CX23885_AUD_MC_INT_CTRL_BITS); if (irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT) { if (aud_stat) { cx25840_write(c, CX25840_AUD_INT_STAT_REG, aud_stat); *handled = true; } } vid_stat = cx25840_read4(c, CX25840_VID_INT_STAT_REG); v4l_dbg(2, cx25840_debug, c, "AV Core video IRQ status: %#06x disables: %#06x\n", vid_stat & CX25840_VID_INT_STAT_BITS, vid_stat >> CX25840_VID_INT_MASK_SHFT); if (irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT) { if (vid_stat & CX25840_VID_INT_STAT_BITS) { cx25840_write4(c, CX25840_VID_INT_STAT_REG, vid_stat); *handled = true; } } irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG); v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (exit): %s %s %s\n", irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT ? "ir" : " ", irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT ? "aud" : " ", irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT ? "vid" : " "); return ret; } static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled) { struct cx25840_state *state = to_state(sd); *handled = false; /* Only support the CX2388[578] AV Core for now */ if (is_cx2388x(state)) return cx23885_irq_handler(sd, status, handled); return -ENODEV; } /* ----------------------------------------------------------------------- */ #define DIF_PLL_FREQ_WORD (0x300) #define DIF_BPF_COEFF01 (0x348) #define DIF_BPF_COEFF23 (0x34c) #define DIF_BPF_COEFF45 (0x350) #define DIF_BPF_COEFF67 (0x354) #define DIF_BPF_COEFF89 (0x358) #define DIF_BPF_COEFF1011 (0x35c) #define DIF_BPF_COEFF1213 (0x360) #define DIF_BPF_COEFF1415 (0x364) #define DIF_BPF_COEFF1617 (0x368) #define DIF_BPF_COEFF1819 (0x36c) #define DIF_BPF_COEFF2021 (0x370) #define DIF_BPF_COEFF2223 (0x374) #define DIF_BPF_COEFF2425 (0x378) #define DIF_BPF_COEFF2627 (0x37c) #define DIF_BPF_COEFF2829 (0x380) #define DIF_BPF_COEFF3031 (0x384) #define DIF_BPF_COEFF3233 (0x388) #define DIF_BPF_COEFF3435 (0x38c) #define DIF_BPF_COEFF36 (0x390) static const u32 ifhz_coeffs[][19] = { { // 3.0 MHz 0x00000002, 0x00080012, 0x001e0024, 0x001bfff8, 0xffb4ff50, 0xfed8fe68, 0xfe24fe34, 0xfebaffc7, 0x014d031f, 0x04f0065d, 0x07010688, 0x04c901d6, 0xfe00f9d3, 0xf600f342, 0xf235f337, 0xf64efb22, 0x0105070f, 0x0c460fce, 0x110d0000, }, { // 3.1 MHz 0x00000001, 0x00070012, 0x00220032, 0x00370026, 0xfff0ff91, 0xff0efe7c, 0xfe01fdcc, 0xfe0afedb, 0x00440224, 0x0434060c, 0x0738074e, 0x06090361, 0xff99fb39, 0xf6fef3b6, 0xf21af2a5, 0xf573fa33, 0x0034067d, 0x0bfb0fb9, 0x110d0000, }, { // 3.2 MHz 0x00000000, 0x0004000e, 0x00200038, 0x004c004f, 0x002fffdf, 0xff5cfeb6, 0xfe0dfd92, 0xfd7ffe03, 0xff36010a, 0x03410575, 0x072607d2, 0x071804d5, 0x0134fcb7, 0xf81ff451, 0xf223f22e, 0xf4a7f94b, 0xff6405e8, 0x0bae0fa4, 0x110d0000, }, { // 3.3 MHz 0x0000ffff, 0x00000008, 0x001a0036, 0x0056006d, 0x00670030, 0xffbdff10, 0xfe46fd8d, 0xfd25fd4f, 0xfe35ffe0, 0x0224049f, 0x06c9080e, 0x07ef0627, 0x02c9fe45, 0xf961f513, 0xf250f1d2, 0xf3ecf869, 0xfe930552, 0x0b5f0f8f, 0x110d0000, }, { // 3.4 MHz 0xfffffffe, 0xfffd0001, 0x000f002c, 0x0054007d, 0x0093007c, 0x0024ff82, 0xfea6fdbb, 0xfd03fcca, 0xfd51feb9, 0x00eb0392, 0x06270802, 0x08880750, 0x044dffdb, 0xfabdf5f8, 0xf2a0f193, 0xf342f78f, 0xfdc404b9, 0x0b0e0f78, 0x110d0000, }, { // 3.5 MHz 0xfffffffd, 0xfffafff9, 0x0002001b, 0x0046007d, 0x00ad00ba, 0x00870000, 0xff26fe1a, 0xfd1bfc7e, 0xfc99fda4, 0xffa5025c, 0x054507ad, 0x08dd0847, 0x05b80172, 0xfc2ef6ff, 0xf313f170, 0xf2abf6bd, 0xfcf6041f, 0x0abc0f61, 0x110d0000, }, { // 3.6 MHz 0xfffffffd, 0xfff8fff3, 0xfff50006, 0x002f006c, 0x00b200e3, 0x00dc007e, 0xffb9fea0, 0xfd6bfc71, 0xfc17fcb1, 0xfe65010b, 0x042d0713, 0x08ec0906, 0x07020302, 0xfdaff823, 0xf3a7f16a, 0xf228f5f5, 0xfc2a0384, 0x0a670f4a, 0x110d0000, }, { // 3.7 MHz 0x0000fffd, 0xfff7ffef, 0xffe9fff1, 0x0010004d, 0x00a100f2, 0x011a00f0, 0x0053ff44, 0xfdedfca2, 0xfbd3fbef, 0xfd39ffae, 0x02ea0638, 0x08b50987, 0x08230483, 0xff39f960, 0xf45bf180, 0xf1b8f537, 0xfb6102e7, 0x0a110f32, 0x110d0000, }, { // 3.8 MHz 0x0000fffe, 0xfff9ffee, 0xffe1ffdd, 0xfff00024, 0x007c00e5, 0x013a014a, 0x00e6fff8, 0xfe98fd0f, 0xfbd3fb67, 0xfc32fe54, 0x01880525, 0x083909c7, 0x091505ee, 0x00c7fab3, 0xf52df1b4, 0xf15df484, 0xfa9b0249, 0x09ba0f19, 0x110d0000, }, { // 3.9 MHz 0x00000000, 0xfffbfff0, 0xffdeffcf, 0xffd1fff6, 0x004800be, 0x01390184, 0x016300ac, 0xff5efdb1, 0xfc17fb23, 0xfb5cfd0d, 0x001703e4, 0x077b09c4, 0x09d2073c, 0x0251fc18, 0xf61cf203, 0xf118f3dc, 0xf9d801aa, 0x09600eff, 0x110d0000, }, { // 4.0 MHz 0x00000001, 0xfffefff4, 0xffe1ffc8, 0xffbaffca, 0x000b0082, 0x01170198, 0x01c10152, 0x0030fe7b, 0xfc99fb24, 0xfac3fbe9, 0xfea5027f, 0x0683097f, 0x0a560867, 0x03d2fd89, 0xf723f26f, 0xf0e8f341, 0xf919010a, 0x09060ee5, 0x110d0000, }, { // 4.1 MHz 0x00010002, 0x0002fffb, 0xffe8ffca, 0xffacffa4, 0xffcd0036, 0x00d70184, 0x01f601dc, 0x00ffff60, 0xfd51fb6d, 0xfa6efaf5, 0xfd410103, 0x055708f9, 0x0a9e0969, 0x0543ff02, 0xf842f2f5, 0xf0cef2b2, 0xf85e006b, 0x08aa0ecb, 0x110d0000, }, { // 4.2 MHz 0x00010003, 0x00050003, 0xfff3ffd3, 0xffaaff8b, 0xff95ffe5, 0x0080014a, 0x01fe023f, 0x01ba0050, 0xfe35fbf8, 0xfa62fa3b, 0xfbf9ff7e, 0x04010836, 0x0aa90a3d, 0x069f007f, 0xf975f395, 0xf0cbf231, 0xf7a9ffcb, 0x084c0eaf, 0x110d0000, }, { // 4.3 MHz 0x00010003, 0x0008000a, 0x0000ffe4, 0xffb4ff81, 0xff6aff96, 0x001c00f0, 0x01d70271, 0x0254013b, 0xff36fcbd, 0xfa9ff9c5, 0xfadbfdfe, 0x028c073b, 0x0a750adf, 0x07e101fa, 0xfab8f44e, 0xf0ddf1be, 0xf6f9ff2b, 0x07ed0e94, 0x110d0000, }, { // 4.4 MHz 0x00000003, 0x0009000f, 0x000efff8, 0xffc9ff87, 0xff52ff54, 0xffb5007e, 0x01860270, 0x02c00210, 0x0044fdb2, 0xfb22f997, 0xf9f2fc90, 0x0102060f, 0x0a050b4c, 0x0902036e, 0xfc0af51e, 0xf106f15a, 0xf64efe8b, 0x078d0e77, 0x110d0000, }, { // 4.5 MHz 0x00000002, 0x00080012, 0x0019000e, 0xffe5ff9e, 0xff4fff25, 0xff560000, 0x0112023b, 0x02f702c0, 0x014dfec8, 0xfbe5f9b3, 0xf947fb41, 0xff7004b9, 0x095a0b81, 0x0a0004d8, 0xfd65f603, 0xf144f104, 0xf5aafdec, 0x072b0e5a, 0x110d0000, }, { // 4.6 MHz 0x00000001, 0x00060012, 0x00200022, 0x0005ffc1, 0xff61ff10, 0xff09ff82, 0x008601d7, 0x02f50340, 0x0241fff0, 0xfcddfa19, 0xf8e2fa1e, 0xfde30343, 0x08790b7f, 0x0ad50631, 0xfec7f6fc, 0xf198f0bd, 0xf50dfd4e, 0x06c90e3d, 0x110d0000, }, { // 4.7 MHz 0x0000ffff, 0x0003000f, 0x00220030, 0x0025ffed, 0xff87ff15, 0xfed6ff10, 0xffed014c, 0x02b90386, 0x03110119, 0xfdfefac4, 0xf8c6f92f, 0xfc6701b7, 0x07670b44, 0x0b7e0776, 0x002df807, 0xf200f086, 0xf477fcb1, 0x06650e1e, 0x110d0000, }, { // 4.8 MHz 0xfffffffe, 0xffff0009, 0x001e0038, 0x003f001b, 0xffbcff36, 0xfec2feb6, 0xff5600a5, 0x0248038d, 0x03b00232, 0xff39fbab, 0xf8f4f87f, 0xfb060020, 0x062a0ad2, 0x0bf908a3, 0x0192f922, 0xf27df05e, 0xf3e8fc14, 0x06000e00, 0x110d0000, }, { // 4.9 MHz 0xfffffffd, 0xfffc0002, 0x00160037, 0x00510046, 0xfff9ff6d, 0xfed0fe7c, 0xfecefff0, 0x01aa0356, 0x0413032b, 0x007ffcc5, 0xf96cf812, 0xf9cefe87, 0x04c90a2c, 0x0c4309b4, 0x02f3fa4a, 0xf30ef046, 0xf361fb7a, 0x059b0de0, 0x110d0000, }, { // 5.0 MHz 0xfffffffd, 0xfff9fffa, 0x000a002d, 0x00570067, 0x0037ffb5, 0xfefffe68, 0xfe62ff3d, 0x00ec02e3, 0x043503f6, 0x01befe05, 0xfa27f7ee, 0xf8c6fcf8, 0x034c0954, 0x0c5c0aa4, 0x044cfb7e, 0xf3b1f03f, 0xf2e2fae1, 0x05340dc0, 0x110d0000, }, { // 5.1 MHz 0x0000fffd, 0xfff8fff4, 0xfffd001e, 0x0051007b, 0x006e0006, 0xff48fe7c, 0xfe1bfe9a, 0x001d023e, 0x04130488, 0x02e6ff5b, 0xfb1ef812, 0xf7f7fb7f, 0x01bc084e, 0x0c430b72, 0x059afcba, 0xf467f046, 0xf26cfa4a, 0x04cd0da0, 0x110d0000, }, { // 5.2 MHz 0x0000fffe, 0xfff8ffef, 0xfff00009, 0x003f007f, 0x00980056, 0xffa5feb6, 0xfe00fe15, 0xff4b0170, 0x03b004d7, 0x03e800b9, 0xfc48f87f, 0xf768fa23, 0x0022071f, 0x0bf90c1b, 0x06dafdfd, 0xf52df05e, 0xf1fef9b5, 0x04640d7f, 0x110d0000, }, { // 5.3 MHz 0x0000ffff, 0xfff9ffee, 0xffe6fff3, 0x00250072, 0x00af009c, 0x000cff10, 0xfe13fdb8, 0xfe870089, 0x031104e1, 0x04b8020f, 0xfd98f92f, 0xf71df8f0, 0xfe8805ce, 0x0b7e0c9c, 0x0808ff44, 0xf603f086, 0xf19af922, 0x03fb0d5e, 0x110d0000, }, { // 5.4 MHz 0x00000001, 0xfffcffef, 0xffe0ffe0, 0x00050056, 0x00b000d1, 0x0071ff82, 0xfe53fd8c, 0xfddfff99, 0x024104a3, 0x054a034d, 0xff01fa1e, 0xf717f7ed, 0xfcf50461, 0x0ad50cf4, 0x0921008d, 0xf6e7f0bd, 0xf13ff891, 0x03920d3b, 0x110d0000, }, { // 5.5 MHz 0x00010002, 0xfffffff3, 0xffdeffd1, 0xffe5002f, 0x009c00ed, 0x00cb0000, 0xfebafd94, 0xfd61feb0, 0x014d0422, 0x05970464, 0x0074fb41, 0xf759f721, 0xfb7502de, 0x0a000d21, 0x0a2201d4, 0xf7d9f104, 0xf0edf804, 0x03280d19, 0x110d0000, }, { // 5.6 MHz 0x00010003, 0x0003fffa, 0xffe3ffc9, 0xffc90002, 0x007500ef, 0x010e007e, 0xff3dfdcf, 0xfd16fddd, 0x00440365, 0x059b0548, 0x01e3fc90, 0xf7dff691, 0xfa0f014d, 0x09020d23, 0x0b0a0318, 0xf8d7f15a, 0xf0a5f779, 0x02bd0cf6, 0x110d0000, }, { // 5.7 MHz 0x00010003, 0x00060001, 0xffecffc9, 0xffb4ffd4, 0x004000d5, 0x013600f0, 0xffd3fe39, 0xfd04fd31, 0xff360277, 0x055605ef, 0x033efdfe, 0xf8a5f642, 0xf8cbffb6, 0x07e10cfb, 0x0bd50456, 0xf9dff1be, 0xf067f6f2, 0x02520cd2, 0x110d0000, }, { // 5.8 MHz 0x00000003, 0x00080009, 0xfff8ffd2, 0xffaaffac, 0x000200a3, 0x013c014a, 0x006dfec9, 0xfd2bfcb7, 0xfe350165, 0x04cb0651, 0x0477ff7e, 0xf9a5f635, 0xf7b1fe20, 0x069f0ca8, 0x0c81058b, 0xfaf0f231, 0xf033f66d, 0x01e60cae, 0x110d0000, }, { // 5.9 MHz 0x00000002, 0x0009000e, 0x0005ffe1, 0xffacff90, 0xffc5005f, 0x01210184, 0x00fcff72, 0xfd8afc77, 0xfd51003f, 0x04020669, 0x05830103, 0xfad7f66b, 0xf6c8fc93, 0x05430c2b, 0x0d0d06b5, 0xfc08f2b2, 0xf00af5ec, 0x017b0c89, 0x110d0000, }, { // 6.0 MHz 0x00000001, 0x00070012, 0x0012fff5, 0xffbaff82, 0xff8e000f, 0x00e80198, 0x01750028, 0xfe18fc75, 0xfc99ff15, 0x03050636, 0x0656027f, 0xfc32f6e2, 0xf614fb17, 0x03d20b87, 0x0d7707d2, 0xfd26f341, 0xefeaf56f, 0x010f0c64, 0x110d0000, }, { // 6.1 MHz 0xffff0000, 0x00050012, 0x001c000b, 0xffd1ff84, 0xff66ffbe, 0x00960184, 0x01cd00da, 0xfeccfcb2, 0xfc17fdf9, 0x01e005bc, 0x06e703e4, 0xfdabf798, 0xf599f9b3, 0x02510abd, 0x0dbf08df, 0xfe48f3dc, 0xefd5f4f6, 0x00a20c3e, 0x110d0000, }, { // 6.2 MHz 0xfffffffe, 0x0002000f, 0x0021001f, 0xfff0ff97, 0xff50ff74, 0x0034014a, 0x01fa0179, 0xff97fd2a, 0xfbd3fcfa, 0x00a304fe, 0x07310525, 0xff37f886, 0xf55cf86e, 0x00c709d0, 0x0de209db, 0xff6df484, 0xefcbf481, 0x00360c18, 0x110d0000, }, { // 6.3 MHz 0xfffffffd, 0xfffe000a, 0x0021002f, 0x0010ffb8, 0xff50ff3b, 0xffcc00f0, 0x01fa01fa, 0x0069fdd4, 0xfbd3fc26, 0xff5d0407, 0x07310638, 0x00c9f9a8, 0xf55cf74e, 0xff3908c3, 0x0de20ac3, 0x0093f537, 0xefcbf410, 0xffca0bf2, 0x110d0000, }, { // 6.4 MHz 0xfffffffd, 0xfffb0003, 0x001c0037, 0x002fffe2, 0xff66ff17, 0xff6a007e, 0x01cd0251, 0x0134fea5, 0xfc17fb8b, 0xfe2002e0, 0x06e70713, 0x0255faf5, 0xf599f658, 0xfdaf0799, 0x0dbf0b96, 0x01b8f5f5, 0xefd5f3a3, 0xff5e0bca, 0x110d0000, }, { // 6.5 MHz 0x0000fffd, 0xfff9fffb, 0x00120037, 0x00460010, 0xff8eff0f, 0xff180000, 0x01750276, 0x01e8ff8d, 0xfc99fb31, 0xfcfb0198, 0x065607ad, 0x03cefc64, 0xf614f592, 0xfc2e0656, 0x0d770c52, 0x02daf6bd, 0xefeaf33b, 0xfef10ba3, 0x110d0000, }, { // 6.6 MHz 0x0000fffe, 0xfff7fff5, 0x0005002f, 0x0054003c, 0xffc5ff22, 0xfedfff82, 0x00fc0267, 0x0276007e, 0xfd51fb1c, 0xfbfe003e, 0x05830802, 0x0529fdec, 0xf6c8f4fe, 0xfabd04ff, 0x0d0d0cf6, 0x03f8f78f, 0xf00af2d7, 0xfe850b7b, 0x110d0000, }, { // 6.7 MHz 0x0000ffff, 0xfff8fff0, 0xfff80020, 0x00560060, 0x0002ff4e, 0xfec4ff10, 0x006d0225, 0x02d50166, 0xfe35fb4e, 0xfb35fee1, 0x0477080e, 0x065bff82, 0xf7b1f4a0, 0xf9610397, 0x0c810d80, 0x0510f869, 0xf033f278, 0xfe1a0b52, 0x110d0000, }, { // 6.8 MHz 0x00010000, 0xfffaffee, 0xffec000c, 0x004c0078, 0x0040ff8e, 0xfecafeb6, 0xffd301b6, 0x02fc0235, 0xff36fbc5, 0xfaaafd90, 0x033e07d2, 0x075b011b, 0xf8cbf47a, 0xf81f0224, 0x0bd50def, 0x0621f94b, 0xf067f21e, 0xfdae0b29, 0x110d0000, }, { // 6.9 MHz 0x00010001, 0xfffdffef, 0xffe3fff6, 0x0037007f, 0x0075ffdc, 0xfef2fe7c, 0xff3d0122, 0x02ea02dd, 0x0044fc79, 0xfa65fc5d, 0x01e3074e, 0x082102ad, 0xfa0ff48c, 0xf6fe00a9, 0x0b0a0e43, 0x0729fa33, 0xf0a5f1c9, 0xfd430b00, 0x110d0000, }, { // 7.0 MHz 0x00010002, 0x0001fff3, 0xffdeffe2, 0x001b0076, 0x009c002d, 0xff35fe68, 0xfeba0076, 0x029f0352, 0x014dfd60, 0xfa69fb53, 0x00740688, 0x08a7042d, 0xfb75f4d6, 0xf600ff2d, 0x0a220e7a, 0x0827fb22, 0xf0edf17a, 0xfcd80ad6, 0x110d0000, }, { // 7.1 MHz 0x00000003, 0x0004fff9, 0xffe0ffd2, 0xfffb005e, 0x00b0007a, 0xff8ffe7c, 0xfe53ffc1, 0x0221038c, 0x0241fe6e, 0xfab6fa80, 0xff010587, 0x08e90590, 0xfcf5f556, 0xf52bfdb3, 0x09210e95, 0x0919fc15, 0xf13ff12f, 0xfc6e0aab, 0x110d0000, }, { // 7.2 MHz 0x00000003, 0x00070000, 0xffe6ffc9, 0xffdb0039, 0x00af00b8, 0xfff4feb6, 0xfe13ff10, 0x01790388, 0x0311ff92, 0xfb48f9ed, 0xfd980453, 0x08e306cd, 0xfe88f60a, 0xf482fc40, 0x08080e93, 0x09fdfd0c, 0xf19af0ea, 0xfc050a81, 0x110d0000, }, { // 7.3 MHz 0x00000002, 0x00080008, 0xfff0ffc9, 0xffc1000d, 0x009800e2, 0x005bff10, 0xfe00fe74, 0x00b50345, 0x03b000bc, 0xfc18f9a1, 0xfc4802f9, 0x089807dc, 0x0022f6f0, 0xf407fada, 0x06da0e74, 0x0ad3fe06, 0xf1fef0ab, 0xfb9c0a55, 0x110d0000, }, { // 7.4 MHz 0x00000001, 0x0008000e, 0xfffdffd0, 0xffafffdf, 0x006e00f2, 0x00b8ff82, 0xfe1bfdf8, 0xffe302c8, 0x041301dc, 0xfd1af99e, 0xfb1e0183, 0x080908b5, 0x01bcf801, 0xf3bdf985, 0x059a0e38, 0x0b99ff03, 0xf26cf071, 0xfb330a2a, 0x110d0000, }, { // 7.5 MHz 0xffff0000, 0x00070011, 0x000affdf, 0xffa9ffb5, 0x003700e6, 0x01010000, 0xfe62fda8, 0xff140219, 0x043502e1, 0xfe42f9e6, 0xfa270000, 0x073a0953, 0x034cf939, 0xf3a4f845, 0x044c0de1, 0x0c4f0000, 0xf2e2f03c, 0xfacc09fe, 0x110d0000, }, { // 7.6 MHz 0xffffffff, 0x00040012, 0x0016fff3, 0xffafff95, 0xfff900c0, 0x0130007e, 0xfecefd89, 0xfe560146, 0x041303bc, 0xff81fa76, 0xf96cfe7d, 0x063209b1, 0x04c9fa93, 0xf3bdf71e, 0x02f30d6e, 0x0cf200fd, 0xf361f00e, 0xfa6509d1, 0x110d0000, }, { // 7.7 MHz 0xfffffffe, 0x00010010, 0x001e0008, 0xffc1ff84, 0xffbc0084, 0x013e00f0, 0xff56fd9f, 0xfdb8005c, 0x03b00460, 0x00c7fb45, 0xf8f4fd07, 0x04fa09ce, 0x062afc07, 0xf407f614, 0x01920ce0, 0x0d8301fa, 0xf3e8efe5, 0xfa0009a4, 0x110d0000, }, { // 7.8 MHz 0x0000fffd, 0xfffd000b, 0x0022001d, 0xffdbff82, 0xff870039, 0x012a014a, 0xffedfde7, 0xfd47ff6b, 0x031104c6, 0x0202fc4c, 0xf8c6fbad, 0x039909a7, 0x0767fd8e, 0xf482f52b, 0x002d0c39, 0x0e0002f4, 0xf477efc2, 0xf99b0977, 0x110d0000, }, { // 7.9 MHz 0x0000fffd, 0xfffa0004, 0x0020002d, 0xfffbff91, 0xff61ffe8, 0x00f70184, 0x0086fe5c, 0xfd0bfe85, 0x024104e5, 0x0323fd7d, 0xf8e2fa79, 0x021d093f, 0x0879ff22, 0xf52bf465, 0xfec70b79, 0x0e6803eb, 0xf50defa5, 0xf937094a, 0x110d0000, }, { // 8.0 MHz 0x0000fffe, 0xfff8fffd, 0x00190036, 0x001bffaf, 0xff4fff99, 0x00aa0198, 0x0112fef3, 0xfd09fdb9, 0x014d04be, 0x041bfecc, 0xf947f978, 0x00900897, 0x095a00b9, 0xf600f3c5, 0xfd650aa3, 0x0ebc04de, 0xf5aaef8e, 0xf8d5091c, 0x110d0000, }, { // 8.1 MHz 0x0000ffff, 0xfff7fff6, 0x000e0038, 0x0037ffd7, 0xff52ff56, 0x004b0184, 0x0186ffa1, 0xfd40fd16, 0x00440452, 0x04de0029, 0xf9f2f8b2, 0xfefe07b5, 0x0a05024d, 0xf6fef34d, 0xfc0a09b8, 0x0efa05cd, 0xf64eef7d, 0xf87308ed, 0x110d0000, }, { // 8.2 MHz 0x00010000, 0xfff8fff0, 0x00000031, 0x004c0005, 0xff6aff27, 0xffe4014a, 0x01d70057, 0xfdacfca6, 0xff3603a7, 0x05610184, 0xfadbf82e, 0xfd74069f, 0x0a7503d6, 0xf81ff2ff, 0xfab808b9, 0x0f2306b5, 0xf6f9ef72, 0xf81308bf, 0x110d0000, }, { // 8.3 MHz 0x00010001, 0xfffbffee, 0xfff30022, 0x00560032, 0xff95ff10, 0xff8000f0, 0x01fe0106, 0xfe46fc71, 0xfe3502c7, 0x059e02ce, 0xfbf9f7f2, 0xfbff055b, 0x0aa9054c, 0xf961f2db, 0xf97507aa, 0x0f350797, 0xf7a9ef6d, 0xf7b40890, 0x110d0000, }, { // 8.4 MHz 0x00010002, 0xfffeffee, 0xffe8000f, 0x00540058, 0xffcdff14, 0xff29007e, 0x01f6019e, 0xff01fc7c, 0xfd5101bf, 0x059203f6, 0xfd41f7fe, 0xfaa903f3, 0x0a9e06a9, 0xfabdf2e2, 0xf842068b, 0x0f320871, 0xf85eef6e, 0xf7560860, 0x110d0000, }, { // 8.5 MHz 0x00000003, 0x0002fff2, 0xffe1fff9, 0x00460073, 0x000bff34, 0xfee90000, 0x01c10215, 0xffd0fcc5, 0xfc99009d, 0x053d04f1, 0xfea5f853, 0xf97d0270, 0x0a5607e4, 0xfc2ef314, 0xf723055f, 0x0f180943, 0xf919ef75, 0xf6fa0830, 0x110d0000, }, { // 8.6 MHz 0x00000003, 0x0005fff8, 0xffdeffe4, 0x002f007f, 0x0048ff6b, 0xfec7ff82, 0x0163025f, 0x00a2fd47, 0xfc17ff73, 0x04a405b2, 0x0017f8ed, 0xf88500dc, 0x09d208f9, 0xfdaff370, 0xf61c0429, 0x0ee80a0b, 0xf9d8ef82, 0xf6a00800, 0x110d0000, }, { // 8.7 MHz 0x00000003, 0x0007ffff, 0xffe1ffd4, 0x0010007a, 0x007cffb2, 0xfec6ff10, 0x00e60277, 0x0168fdf9, 0xfbd3fe50, 0x03ce0631, 0x0188f9c8, 0xf7c7ff43, 0x091509e3, 0xff39f3f6, 0xf52d02ea, 0x0ea30ac9, 0xfa9bef95, 0xf64607d0, 0x110d0000, }, { // 8.8 MHz 0x00000002, 0x00090007, 0xffe9ffca, 0xfff00065, 0x00a10003, 0xfee6feb6, 0x0053025b, 0x0213fed0, 0xfbd3fd46, 0x02c70668, 0x02eafadb, 0xf74bfdae, 0x08230a9c, 0x00c7f4a3, 0xf45b01a6, 0x0e480b7c, 0xfb61efae, 0xf5ef079f, 0x110d0000, }, { // 8.9 MHz 0xffff0000, 0x0008000d, 0xfff5ffc8, 0xffd10043, 0x00b20053, 0xff24fe7c, 0xffb9020c, 0x0295ffbb, 0xfc17fc64, 0x019b0654, 0x042dfc1c, 0xf714fc2a, 0x07020b21, 0x0251f575, 0xf3a7005e, 0x0dd80c24, 0xfc2aefcd, 0xf599076e, 0x110d0000, }, { // 9.0 MHz 0xffffffff, 0x00060011, 0x0002ffcf, 0xffba0018, 0x00ad009a, 0xff79fe68, 0xff260192, 0x02e500ab, 0xfc99fbb6, 0x005b05f7, 0x0545fd81, 0xf723fabf, 0x05b80b70, 0x03d2f669, 0xf313ff15, 0x0d550cbf, 0xfcf6eff2, 0xf544073d, 0x110d0000, }, { // 9.1 MHz 0xfffffffe, 0x00030012, 0x000fffdd, 0xffacffea, 0x009300cf, 0xffdcfe7c, 0xfea600f7, 0x02fd0190, 0xfd51fb46, 0xff150554, 0x0627fefd, 0xf778f978, 0x044d0b87, 0x0543f77d, 0xf2a0fdcf, 0x0cbe0d4e, 0xfdc4f01d, 0xf4f2070b, 0x110d0000, }, { // 9.2 MHz 0x0000fffd, 0x00000010, 0x001afff0, 0xffaaffbf, 0x006700ed, 0x0043feb6, 0xfe460047, 0x02db0258, 0xfe35fb1b, 0xfddc0473, 0x06c90082, 0xf811f85e, 0x02c90b66, 0x069ff8ad, 0xf250fc8d, 0x0c140dcf, 0xfe93f04d, 0xf4a106d9, 0x110d0000, }, { // 9.3 MHz 0x0000fffd, 0xfffc000c, 0x00200006, 0xffb4ff9c, 0x002f00ef, 0x00a4ff10, 0xfe0dff92, 0x028102f7, 0xff36fb37, 0xfcbf035e, 0x07260202, 0xf8e8f778, 0x01340b0d, 0x07e1f9f4, 0xf223fb51, 0x0b590e42, 0xff64f083, 0xf45206a7, 0x110d0000, }, { // 9.4 MHz 0x0000fffd, 0xfff90005, 0x0022001a, 0xffc9ff86, 0xfff000d7, 0x00f2ff82, 0xfe01fee5, 0x01f60362, 0x0044fb99, 0xfbcc0222, 0x07380370, 0xf9f7f6cc, 0xff990a7e, 0x0902fb50, 0xf21afa1f, 0x0a8d0ea6, 0x0034f0bf, 0xf4050675, 0x110d0000, }, { // 9.5 MHz 0x0000fffe, 0xfff8fffe, 0x001e002b, 0xffe5ff81, 0xffb400a5, 0x01280000, 0xfe24fe50, 0x01460390, 0x014dfc3a, 0xfb1000ce, 0x070104bf, 0xfb37f65f, 0xfe0009bc, 0x0a00fcbb, 0xf235f8f8, 0x09b20efc, 0x0105f101, 0xf3ba0642, 0x110d0000, }, { // 9.6 MHz 0x0001ffff, 0xfff8fff7, 0x00150036, 0x0005ff8c, 0xff810061, 0x013d007e, 0xfe71fddf, 0x007c0380, 0x0241fd13, 0xfa94ff70, 0x068005e2, 0xfc9bf633, 0xfc7308ca, 0x0ad5fe30, 0xf274f7e0, 0x08c90f43, 0x01d4f147, 0xf371060f, 0x110d0000, }, { // 9.7 MHz 0x00010001, 0xfff9fff1, 0x00090038, 0x0025ffa7, 0xff5e0012, 0x013200f0, 0xfee3fd9b, 0xffaa0331, 0x0311fe15, 0xfa60fe18, 0x05bd06d1, 0xfe1bf64a, 0xfafa07ae, 0x0b7effab, 0xf2d5f6d7, 0x07d30f7a, 0x02a3f194, 0xf32905dc, 0x110d0000, }, { // 9.8 MHz 0x00010002, 0xfffcffee, 0xfffb0032, 0x003fffcd, 0xff4effc1, 0x0106014a, 0xff6efd8a, 0xfedd02aa, 0x03b0ff34, 0xfa74fcd7, 0x04bf0781, 0xffaaf6a3, 0xf99e066b, 0x0bf90128, 0xf359f5e1, 0x06d20fa2, 0x0370f1e5, 0xf2e405a8, 0x110d0000, }, { // 9.9 MHz 0x00000003, 0xffffffee, 0xffef0024, 0x0051fffa, 0xff54ff77, 0x00be0184, 0x0006fdad, 0xfe2701f3, 0x0413005e, 0xfad1fbba, 0x039007ee, 0x013bf73d, 0xf868050a, 0x0c4302a1, 0xf3fdf4fe, 0x05c70fba, 0x043bf23c, 0xf2a10575, 0x110d0000, }, { // 10.0 MHz 0x00000003, 0x0003fff1, 0xffe50011, 0x00570027, 0xff70ff3c, 0x00620198, 0x009efe01, 0xfd95011a, 0x04350183, 0xfb71fad0, 0x023c0812, 0x02c3f811, 0xf75e0390, 0x0c5c0411, 0xf4c1f432, 0x04b30fc1, 0x0503f297, 0xf2610541, 0x110d0000, }, { // 10.1 MHz 0x00000003, 0x0006fff7, 0xffdffffc, 0x00510050, 0xff9dff18, 0xfffc0184, 0x0128fe80, 0xfd32002e, 0x04130292, 0xfc4dfa21, 0x00d107ee, 0x0435f91c, 0xf6850205, 0x0c430573, 0xf5a1f37d, 0x03990fba, 0x05c7f2f8, 0xf222050d, 0x110d0000, }, { // 10.2 MHz 0x00000002, 0x0008fffe, 0xffdfffe7, 0x003f006e, 0xffd6ff0f, 0xff96014a, 0x0197ff1f, 0xfd05ff3e, 0x03b0037c, 0xfd59f9b7, 0xff5d0781, 0x0585fa56, 0xf5e4006f, 0x0bf906c4, 0xf69df2e0, 0x02790fa2, 0x0688f35d, 0xf1e604d8, 0x110d0000, }, { // 10.3 MHz 0xffff0001, 0x00090005, 0xffe4ffd6, 0x0025007e, 0x0014ff20, 0xff3c00f0, 0x01e1ffd0, 0xfd12fe5c, 0x03110433, 0xfe88f996, 0xfdf106d1, 0x06aafbb7, 0xf57efed8, 0x0b7e07ff, 0xf7b0f25e, 0x01560f7a, 0x0745f3c7, 0xf1ac04a4, 0x110d0000, }, { // 10.4 MHz 0xffffffff, 0x0008000c, 0xffedffcb, 0x0005007d, 0x0050ff4c, 0xfef6007e, 0x01ff0086, 0xfd58fd97, 0x024104ad, 0xffcaf9c0, 0xfc9905e2, 0x079afd35, 0xf555fd46, 0x0ad50920, 0xf8d9f1f6, 0x00310f43, 0x07fdf435, 0xf174046f, 0x110d0000, }, { // 10.5 MHz 0xfffffffe, 0x00050011, 0xfffaffc8, 0xffe5006b, 0x0082ff8c, 0xfecc0000, 0x01f00130, 0xfdd2fcfc, 0x014d04e3, 0x010efa32, 0xfb6404bf, 0x084efec5, 0xf569fbc2, 0x0a000a23, 0xfa15f1ab, 0xff0b0efc, 0x08b0f4a7, 0xf13f043a, 0x110d0000, }, { // 10.6 MHz 0x0000fffd, 0x00020012, 0x0007ffcd, 0xffc9004c, 0x00a4ffd9, 0xfec3ff82, 0x01b401c1, 0xfe76fc97, 0x004404d2, 0x0245fae8, 0xfa5f0370, 0x08c1005f, 0xf5bcfa52, 0x09020b04, 0xfb60f17b, 0xfde70ea6, 0x095df51e, 0xf10c0405, 0x110d0000, }, { // 10.7 MHz 0x0000fffd, 0xffff0011, 0x0014ffdb, 0xffb40023, 0x00b2002a, 0xfedbff10, 0x0150022d, 0xff38fc6f, 0xff36047b, 0x035efbda, 0xf9940202, 0x08ee01f5, 0xf649f8fe, 0x07e10bc2, 0xfcb6f169, 0xfcc60e42, 0x0a04f599, 0xf0db03d0, 0x110d0000, }, { // 10.8 MHz 0x0000fffd, 0xfffb000d, 0x001dffed, 0xffaafff5, 0x00aa0077, 0xff13feb6, 0x00ce026b, 0x000afc85, 0xfe3503e3, 0x044cfcfb, 0xf90c0082, 0x08d5037f, 0xf710f7cc, 0x069f0c59, 0xfe16f173, 0xfbaa0dcf, 0x0aa5f617, 0xf0ad039b, 0x110d0000, }, { // 10.9 MHz 0x0000fffe, 0xfff90006, 0x00210003, 0xffacffc8, 0x008e00b6, 0xff63fe7c, 0x003a0275, 0x00dafcda, 0xfd510313, 0x0501fe40, 0xf8cbfefd, 0x087604f0, 0xf80af6c2, 0x05430cc8, 0xff7af19a, 0xfa940d4e, 0x0b3ff699, 0xf0810365, 0x110d0000, }, { // 11.0 MHz 0x0001ffff, 0xfff8ffff, 0x00210018, 0xffbaffa3, 0x006000e1, 0xffc4fe68, 0xffa0024b, 0x019afd66, 0xfc990216, 0x0575ff99, 0xf8d4fd81, 0x07d40640, 0xf932f5e6, 0x03d20d0d, 0x00dff1de, 0xf9860cbf, 0x0bd1f71e, 0xf058032f, 0x110d0000, }, { // 11.1 MHz 0x00010000, 0xfff8fff8, 0x001b0029, 0xffd1ff8a, 0x002600f2, 0x002cfe7c, 0xff0f01f0, 0x023bfe20, 0xfc1700fa, 0x05a200f7, 0xf927fc1c, 0x06f40765, 0xfa82f53b, 0x02510d27, 0x0243f23d, 0xf8810c24, 0x0c5cf7a7, 0xf03102fa, 0x110d0000, }, { // 11.2 MHz 0x00010002, 0xfffafff2, 0x00110035, 0xfff0ff81, 0xffe700e7, 0x008ffeb6, 0xfe94016d, 0x02b0fefb, 0xfbd3ffd1, 0x05850249, 0xf9c1fadb, 0x05de0858, 0xfbf2f4c4, 0x00c70d17, 0x03a0f2b8, 0xf7870b7c, 0x0cdff833, 0xf00d02c4, 0x110d0000, }, { // 11.3 MHz 0x00000003, 0xfffdffee, 0x00040038, 0x0010ff88, 0xffac00c2, 0x00e2ff10, 0xfe3900cb, 0x02f1ffe9, 0xfbd3feaa, 0x05210381, 0xfa9cf9c8, 0x04990912, 0xfd7af484, 0xff390cdb, 0x04f4f34d, 0xf69a0ac9, 0x0d5af8c1, 0xefec028e, 0x110d0000, }, { // 11.4 MHz 0x00000003, 0x0000ffee, 0xfff60033, 0x002fff9f, 0xff7b0087, 0x011eff82, 0xfe080018, 0x02f900d8, 0xfc17fd96, 0x04790490, 0xfbadf8ed, 0x032f098e, 0xff10f47d, 0xfdaf0c75, 0x063cf3fc, 0xf5ba0a0b, 0x0dccf952, 0xefcd0258, 0x110d0000, }, { // 11.5 MHz 0x00000003, 0x0004fff1, 0xffea0026, 0x0046ffc3, 0xff5a003c, 0x013b0000, 0xfe04ff63, 0x02c801b8, 0xfc99fca6, 0x0397056a, 0xfcecf853, 0x01ad09c9, 0x00acf4ad, 0xfc2e0be7, 0x0773f4c2, 0xf4e90943, 0x0e35f9e6, 0xefb10221, 0x110d0000, }, { // 11.6 MHz 0x00000002, 0x0007fff6, 0xffe20014, 0x0054ffee, 0xff4effeb, 0x0137007e, 0xfe2efebb, 0x0260027a, 0xfd51fbe6, 0x02870605, 0xfe4af7fe, 0x001d09c1, 0x0243f515, 0xfabd0b32, 0x0897f59e, 0xf4280871, 0x0e95fa7c, 0xef9701eb, 0x110d0000, }, { // 11.7 MHz 0xffff0001, 0x0008fffd, 0xffdeffff, 0x0056001d, 0xff57ff9c, 0x011300f0, 0xfe82fe2e, 0x01ca0310, 0xfe35fb62, 0x0155065a, 0xffbaf7f2, 0xfe8c0977, 0x03cef5b2, 0xf9610a58, 0x09a5f68f, 0xf3790797, 0x0eebfb14, 0xef8001b5, 0x110d0000, }, { // 11.8 MHz 0xffff0000, 0x00080004, 0xffe0ffe9, 0x004c0047, 0xff75ff58, 0x00d1014a, 0xfef9fdc8, 0x0111036f, 0xff36fb21, 0x00120665, 0x012df82e, 0xfd0708ec, 0x0542f682, 0xf81f095c, 0x0a9af792, 0xf2db06b5, 0x0f38fbad, 0xef6c017e, 0x110d0000, }, { // 11.9 MHz 0xffffffff, 0x0007000b, 0xffe7ffd8, 0x00370068, 0xffa4ff28, 0x00790184, 0xff87fd91, 0x00430392, 0x0044fb26, 0xfece0626, 0x0294f8b2, 0xfb990825, 0x0698f77f, 0xf6fe0842, 0x0b73f8a7, 0xf25105cd, 0x0f7bfc48, 0xef5a0148, 0x110d0000, }, { // 12.0 MHz 0x0000fffe, 0x00050010, 0xfff2ffcc, 0x001b007b, 0xffdfff10, 0x00140198, 0x0020fd8e, 0xff710375, 0x014dfb73, 0xfd9a059f, 0x03e0f978, 0xfa4e0726, 0x07c8f8a7, 0xf600070c, 0x0c2ff9c9, 0xf1db04de, 0x0fb4fce5, 0xef4b0111, 0x110d0000, }, { // 12.1 MHz 0x0000fffd, 0x00010012, 0xffffffc8, 0xfffb007e, 0x001dff14, 0xffad0184, 0x00b7fdbe, 0xfea9031b, 0x0241fc01, 0xfc8504d6, 0x0504fa79, 0xf93005f6, 0x08caf9f2, 0xf52b05c0, 0x0ccbfaf9, 0xf17903eb, 0x0fe3fd83, 0xef3f00db, 0x110d0000, }, { // 12.2 MHz 0x0000fffd, 0xfffe0011, 0x000cffcc, 0xffdb0071, 0x0058ff32, 0xff4f014a, 0x013cfe1f, 0xfdfb028a, 0x0311fcc9, 0xfb9d03d6, 0x05f4fbad, 0xf848049d, 0x0999fb5b, 0xf4820461, 0x0d46fc32, 0xf12d02f4, 0x1007fe21, 0xef3600a4, 0x110d0000, }, { // 12.3 MHz 0x0000fffe, 0xfffa000e, 0x0017ffd9, 0xffc10055, 0x0088ff68, 0xff0400f0, 0x01a6fea7, 0xfd7501cc, 0x03b0fdc0, 0xfaef02a8, 0x06a7fd07, 0xf79d0326, 0x0a31fcda, 0xf40702f3, 0x0d9ffd72, 0xf0f601fa, 0x1021fec0, 0xef2f006d, 0x110d0000, }, { // 12.4 MHz 0x0001ffff, 0xfff80007, 0x001fffeb, 0xffaf002d, 0x00a8ffb0, 0xfed3007e, 0x01e9ff4c, 0xfd2000ee, 0x0413fed8, 0xfa82015c, 0x0715fe7d, 0xf7340198, 0x0a8dfe69, 0xf3bd017c, 0x0dd5feb8, 0xf0d500fd, 0x1031ff60, 0xef2b0037, 0x110d0000, }, { // 12.5 MHz 0x00010000, 0xfff70000, 0x00220000, 0xffa90000, 0x00b30000, 0xfec20000, 0x02000000, 0xfd030000, 0x04350000, 0xfa5e0000, 0x073b0000, 0xf7110000, 0x0aac0000, 0xf3a40000, 0x0de70000, 0xf0c90000, 0x10360000, 0xef290000, 0x110d0000, }, { // 12.6 MHz 0x00010001, 0xfff8fff9, 0x001f0015, 0xffafffd3, 0x00a80050, 0xfed3ff82, 0x01e900b4, 0xfd20ff12, 0x04130128, 0xfa82fea4, 0x07150183, 0xf734fe68, 0x0a8d0197, 0xf3bdfe84, 0x0dd50148, 0xf0d5ff03, 0x103100a0, 0xef2bffc9, 0x110d0000, }, { // 12.7 MHz 0x00000002, 0xfffafff2, 0x00170027, 0xffc1ffab, 0x00880098, 0xff04ff10, 0x01a60159, 0xfd75fe34, 0x03b00240, 0xfaeffd58, 0x06a702f9, 0xf79dfcda, 0x0a310326, 0xf407fd0d, 0x0d9f028e, 0xf0f6fe06, 0x10210140, 0xef2fff93, 0x110d0000, }, { // 12.8 MHz 0x00000003, 0xfffeffef, 0x000c0034, 0xffdbff8f, 0x005800ce, 0xff4ffeb6, 0x013c01e1, 0xfdfbfd76, 0x03110337, 0xfb9dfc2a, 0x05f40453, 0xf848fb63, 0x099904a5, 0xf482fb9f, 0x0d4603ce, 0xf12dfd0c, 0x100701df, 0xef36ff5c, 0x110d0000, }, { // 12.9 MHz 0x00000003, 0x0001ffee, 0xffff0038, 0xfffbff82, 0x001d00ec, 0xffadfe7c, 0x00b70242, 0xfea9fce5, 0x024103ff, 0xfc85fb2a, 0x05040587, 0xf930fa0a, 0x08ca060e, 0xf52bfa40, 0x0ccb0507, 0xf179fc15, 0x0fe3027d, 0xef3fff25, 0x110d0000, }, { // 13.0 MHz 0x00000002, 0x0005fff0, 0xfff20034, 0x001bff85, 0xffdf00f0, 0x0014fe68, 0x00200272, 0xff71fc8b, 0x014d048d, 0xfd9afa61, 0x03e00688, 0xfa4ef8da, 0x07c80759, 0xf600f8f4, 0x0c2f0637, 0xf1dbfb22, 0x0fb4031b, 0xef4bfeef, 0x110d0000, }, { // 13.1 MHz 0xffff0001, 0x0007fff5, 0xffe70028, 0x0037ff98, 0xffa400d8, 0x0079fe7c, 0xff87026f, 0x0043fc6e, 0x004404da, 0xfecef9da, 0x0294074e, 0xfb99f7db, 0x06980881, 0xf6fef7be, 0x0b730759, 0xf251fa33, 0x0f7b03b8, 0xef5afeb8, 0x110d0000, }, { // 13.2 MHz 0xffff0000, 0x0008fffc, 0xffe00017, 0x004cffb9, 0xff7500a8, 0x00d1feb6, 0xfef90238, 0x0111fc91, 0xff3604df, 0x0012f99b, 0x012d07d2, 0xfd07f714, 0x0542097e, 0xf81ff6a4, 0x0a9a086e, 0xf2dbf94b, 0x0f380453, 0xef6cfe82, 0x110d0000, }, { // 13.3 MHz 0xffffffff, 0x00080003, 0xffde0001, 0x0056ffe3, 0xff570064, 0x0113ff10, 0xfe8201d2, 0x01cafcf0, 0xfe35049e, 0x0155f9a6, 0xffba080e, 0xfe8cf689, 0x03ce0a4e, 0xf961f5a8, 0x09a50971, 0xf379f869, 0x0eeb04ec, 0xef80fe4b, 0x110d0000, }, { // 13.4 MHz 0x0000fffe, 0x0007000a, 0xffe2ffec, 0x00540012, 0xff4e0015, 0x0137ff82, 0xfe2e0145, 0x0260fd86, 0xfd51041a, 0x0287f9fb, 0xfe4a0802, 0x001df63f, 0x02430aeb, 0xfabdf4ce, 0x08970a62, 0xf428f78f, 0x0e950584, 0xef97fe15, 0x110d0000, }, { // 13.5 MHz 0x0000fffd, 0x0004000f, 0xffeaffda, 0x0046003d, 0xff5affc4, 0x013b0000, 0xfe04009d, 0x02c8fe48, 0xfc99035a, 0x0397fa96, 0xfcec07ad, 0x01adf637, 0x00ac0b53, 0xfc2ef419, 0x07730b3e, 0xf4e9f6bd, 0x0e35061a, 0xefb1fddf, 0x110d0000, }, { // 13.6 MHz 0x0000fffd, 0x00000012, 0xfff6ffcd, 0x002f0061, 0xff7bff79, 0x011e007e, 0xfe08ffe8, 0x02f9ff28, 0xfc17026a, 0x0479fb70, 0xfbad0713, 0x032ff672, 0xff100b83, 0xfdaff38b, 0x063c0c04, 0xf5baf5f5, 0x0dcc06ae, 0xefcdfda8, 0x110d0000, }, { // 13.7 MHz 0x0000fffd, 0xfffd0012, 0x0004ffc8, 0x00100078, 0xffacff3e, 0x00e200f0, 0xfe39ff35, 0x02f10017, 0xfbd30156, 0x0521fc7f, 0xfa9c0638, 0x0499f6ee, 0xfd7a0b7c, 0xff39f325, 0x04f40cb3, 0xf69af537, 0x0d5a073f, 0xefecfd72, 0x110d0000, }, { // 13.8 MHz 0x0001fffe, 0xfffa000e, 0x0011ffcb, 0xfff0007f, 0xffe7ff19, 0x008f014a, 0xfe94fe93, 0x02b00105, 0xfbd3002f, 0x0585fdb7, 0xf9c10525, 0x05def7a8, 0xfbf20b3c, 0x00c7f2e9, 0x03a00d48, 0xf787f484, 0x0cdf07cd, 0xf00dfd3c, 0x110d0000, }, { // 13.9 MHz 0x00010000, 0xfff80008, 0x001bffd7, 0xffd10076, 0x0026ff0e, 0x002c0184, 0xff0ffe10, 0x023b01e0, 0xfc17ff06, 0x05a2ff09, 0xf92703e4, 0x06f4f89b, 0xfa820ac5, 0x0251f2d9, 0x02430dc3, 0xf881f3dc, 0x0c5c0859, 0xf031fd06, 0x110d0000, }, { // 14.0 MHz 0x00010001, 0xfff80001, 0x0021ffe8, 0xffba005d, 0x0060ff1f, 0xffc40198, 0xffa0fdb5, 0x019a029a, 0xfc99fdea, 0x05750067, 0xf8d4027f, 0x07d4f9c0, 0xf9320a1a, 0x03d2f2f3, 0x00df0e22, 0xf986f341, 0x0bd108e2, 0xf058fcd1, 0x110d0000, }, { // 14.1 MHz 0x00000002, 0xfff9fffa, 0x0021fffd, 0xffac0038, 0x008eff4a, 0xff630184, 0x003afd8b, 0x00da0326, 0xfd51fced, 0x050101c0, 0xf8cb0103, 0x0876fb10, 0xf80a093e, 0x0543f338, 0xff7a0e66, 0xfa94f2b2, 0x0b3f0967, 0xf081fc9b, 0x110d0000, }, { // 14.2 MHz 0x00000003, 0xfffbfff3, 0x001d0013, 0xffaa000b, 0x00aaff89, 0xff13014a, 0x00cefd95, 0x000a037b, 0xfe35fc1d, 0x044c0305, 0xf90cff7e, 0x08d5fc81, 0xf7100834, 0x069ff3a7, 0xfe160e8d, 0xfbaaf231, 0x0aa509e9, 0xf0adfc65, 0x110d0000, }, { // 14.3 MHz 0x00000003, 0xffffffef, 0x00140025, 0xffb4ffdd, 0x00b2ffd6, 0xfedb00f0, 0x0150fdd3, 0xff380391, 0xff36fb85, 0x035e0426, 0xf994fdfe, 0x08eefe0b, 0xf6490702, 0x07e1f43e, 0xfcb60e97, 0xfcc6f1be, 0x0a040a67, 0xf0dbfc30, 0x110d0000, }, { // 14.4 MHz 0x00000003, 0x0002ffee, 0x00070033, 0xffc9ffb4, 0x00a40027, 0xfec3007e, 0x01b4fe3f, 0xfe760369, 0x0044fb2e, 0x02450518, 0xfa5ffc90, 0x08c1ffa1, 0xf5bc05ae, 0x0902f4fc, 0xfb600e85, 0xfde7f15a, 0x095d0ae2, 0xf10cfbfb, 0x110d0000, }, { // 14.5 MHz 0xffff0002, 0x0005ffef, 0xfffa0038, 0xffe5ff95, 0x00820074, 0xfecc0000, 0x01f0fed0, 0xfdd20304, 0x014dfb1d, 0x010e05ce, 0xfb64fb41, 0x084e013b, 0xf569043e, 0x0a00f5dd, 0xfa150e55, 0xff0bf104, 0x08b00b59, 0xf13ffbc6, 0x110d0000, }, { // 14.6 MHz 0xffff0001, 0x0008fff4, 0xffed0035, 0x0005ff83, 0x005000b4, 0xfef6ff82, 0x01ffff7a, 0xfd580269, 0x0241fb53, 0xffca0640, 0xfc99fa1e, 0x079a02cb, 0xf55502ba, 0x0ad5f6e0, 0xf8d90e0a, 0x0031f0bd, 0x07fd0bcb, 0xf174fb91, 0x110d0000, }, { // 14.7 MHz 0xffffffff, 0x0009fffb, 0xffe4002a, 0x0025ff82, 0x001400e0, 0xff3cff10, 0x01e10030, 0xfd1201a4, 0x0311fbcd, 0xfe88066a, 0xfdf1f92f, 0x06aa0449, 0xf57e0128, 0x0b7ef801, 0xf7b00da2, 0x0156f086, 0x07450c39, 0xf1acfb5c, 0x110d0000, }, { // 14.8 MHz 0x0000fffe, 0x00080002, 0xffdf0019, 0x003fff92, 0xffd600f1, 0xff96feb6, 0x019700e1, 0xfd0500c2, 0x03b0fc84, 0xfd590649, 0xff5df87f, 0x058505aa, 0xf5e4ff91, 0x0bf9f93c, 0xf69d0d20, 0x0279f05e, 0x06880ca3, 0xf1e6fb28, 0x110d0000, }, { // 14.9 MHz 0x0000fffd, 0x00060009, 0xffdf0004, 0x0051ffb0, 0xff9d00e8, 0xfffcfe7c, 0x01280180, 0xfd32ffd2, 0x0413fd6e, 0xfc4d05df, 0x00d1f812, 0x043506e4, 0xf685fdfb, 0x0c43fa8d, 0xf5a10c83, 0x0399f046, 0x05c70d08, 0xf222faf3, 0x110d0000, }, { // 15.0 MHz 0x0000fffd, 0x0003000f, 0xffe5ffef, 0x0057ffd9, 0xff7000c4, 0x0062fe68, 0x009e01ff, 0xfd95fee6, 0x0435fe7d, 0xfb710530, 0x023cf7ee, 0x02c307ef, 0xf75efc70, 0x0c5cfbef, 0xf4c10bce, 0x04b3f03f, 0x05030d69, 0xf261fabf, 0x110d0000, }, { // 15.1 MHz 0x0000fffd, 0xffff0012, 0xffefffdc, 0x00510006, 0xff540089, 0x00befe7c, 0x00060253, 0xfe27fe0d, 0x0413ffa2, 0xfad10446, 0x0390f812, 0x013b08c3, 0xf868faf6, 0x0c43fd5f, 0xf3fd0b02, 0x05c7f046, 0x043b0dc4, 0xf2a1fa8b, 0x110d0000, }, { // 15.2 MHz 0x0001fffe, 0xfffc0012, 0xfffbffce, 0x003f0033, 0xff4e003f, 0x0106feb6, 0xff6e0276, 0xfeddfd56, 0x03b000cc, 0xfa740329, 0x04bff87f, 0xffaa095d, 0xf99ef995, 0x0bf9fed8, 0xf3590a1f, 0x06d2f05e, 0x03700e1b, 0xf2e4fa58, 0x110d0000, }, { // 15.3 MHz 0x0001ffff, 0xfff9000f, 0x0009ffc8, 0x00250059, 0xff5effee, 0x0132ff10, 0xfee30265, 0xffaafccf, 0x031101eb, 0xfa6001e8, 0x05bdf92f, 0xfe1b09b6, 0xfafaf852, 0x0b7e0055, 0xf2d50929, 0x07d3f086, 0x02a30e6c, 0xf329fa24, 0x110d0000, }, { // 15.4 MHz 0x00010001, 0xfff80009, 0x0015ffca, 0x00050074, 0xff81ff9f, 0x013dff82, 0xfe710221, 0x007cfc80, 0x024102ed, 0xfa940090, 0x0680fa1e, 0xfc9b09cd, 0xfc73f736, 0x0ad501d0, 0xf2740820, 0x08c9f0bd, 0x01d40eb9, 0xf371f9f1, 0x110d0000, }, { // 15.5 MHz 0x00000002, 0xfff80002, 0x001effd5, 0xffe5007f, 0xffb4ff5b, 0x01280000, 0xfe2401b0, 0x0146fc70, 0x014d03c6, 0xfb10ff32, 0x0701fb41, 0xfb3709a1, 0xfe00f644, 0x0a000345, 0xf2350708, 0x09b2f104, 0x01050eff, 0xf3baf9be, 0x110d0000, }, { // 15.6 MHz 0x00000003, 0xfff9fffb, 0x0022ffe6, 0xffc9007a, 0xfff0ff29, 0x00f2007e, 0xfe01011b, 0x01f6fc9e, 0x00440467, 0xfbccfdde, 0x0738fc90, 0xf9f70934, 0xff99f582, 0x090204b0, 0xf21a05e1, 0x0a8df15a, 0x00340f41, 0xf405f98b, 0x110d0000, }, { // 15.7 MHz 0x00000003, 0xfffcfff4, 0x0020fffa, 0xffb40064, 0x002fff11, 0x00a400f0, 0xfe0d006e, 0x0281fd09, 0xff3604c9, 0xfcbffca2, 0x0726fdfe, 0xf8e80888, 0x0134f4f3, 0x07e1060c, 0xf22304af, 0x0b59f1be, 0xff640f7d, 0xf452f959, 0x110d0000, }, { // 15.8 MHz 0x00000003, 0x0000fff0, 0x001a0010, 0xffaa0041, 0x0067ff13, 0x0043014a, 0xfe46ffb9, 0x02dbfda8, 0xfe3504e5, 0xfddcfb8d, 0x06c9ff7e, 0xf81107a2, 0x02c9f49a, 0x069f0753, 0xf2500373, 0x0c14f231, 0xfe930fb3, 0xf4a1f927, 0x110d0000, }, { // 15.9 MHz 0xffff0002, 0x0003ffee, 0x000f0023, 0xffac0016, 0x0093ff31, 0xffdc0184, 0xfea6ff09, 0x02fdfe70, 0xfd5104ba, 0xff15faac, 0x06270103, 0xf7780688, 0x044df479, 0x05430883, 0xf2a00231, 0x0cbef2b2, 0xfdc40fe3, 0xf4f2f8f5, 0x110d0000, }, { // 16.0 MHz 0xffff0001, 0x0006ffef, 0x00020031, 0xffbaffe8, 0x00adff66, 0xff790198, 0xff26fe6e, 0x02e5ff55, 0xfc99044a, 0x005bfa09, 0x0545027f, 0xf7230541, 0x05b8f490, 0x03d20997, 0xf31300eb, 0x0d55f341, 0xfcf6100e, 0xf544f8c3, 0x110d0000, } }; static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz) { u64 pll_freq; u32 pll_freq_word; const u32 *coeffs; v4l_dbg(1, cx25840_debug, client, "%s(%d)\n", __func__, ifHz); /* Assuming TV */ /* Calculate the PLL frequency word based on the adjusted ifHz */ pll_freq = div_u64((u64)ifHz * 268435456, 50000000); pll_freq_word = (u32)pll_freq; cx25840_write4(client, DIF_PLL_FREQ_WORD, pll_freq_word); /* Round down to the nearest 100KHz */ ifHz = (ifHz / 100000) * 100000; if (ifHz < 3000000) ifHz = 3000000; if (ifHz > 16000000) ifHz = 16000000; v4l_dbg(1, cx25840_debug, client, "%s(%d) again\n", __func__, ifHz); coeffs = ifhz_coeffs[(ifHz - 3000000) / 100000]; cx25840_write4(client, DIF_BPF_COEFF01, coeffs[0]); cx25840_write4(client, DIF_BPF_COEFF23, coeffs[1]); cx25840_write4(client, DIF_BPF_COEFF45, coeffs[2]); cx25840_write4(client, DIF_BPF_COEFF67, coeffs[3]); cx25840_write4(client, DIF_BPF_COEFF89, coeffs[4]); cx25840_write4(client, DIF_BPF_COEFF1011, coeffs[5]); cx25840_write4(client, DIF_BPF_COEFF1213, coeffs[6]); cx25840_write4(client, DIF_BPF_COEFF1415, coeffs[7]); cx25840_write4(client, DIF_BPF_COEFF1617, coeffs[8]); cx25840_write4(client, DIF_BPF_COEFF1819, coeffs[9]); cx25840_write4(client, DIF_BPF_COEFF2021, coeffs[10]); cx25840_write4(client, DIF_BPF_COEFF2223, coeffs[11]); cx25840_write4(client, DIF_BPF_COEFF2425, coeffs[12]); cx25840_write4(client, DIF_BPF_COEFF2627, coeffs[13]); cx25840_write4(client, DIF_BPF_COEFF2829, coeffs[14]); cx25840_write4(client, DIF_BPF_COEFF3031, coeffs[15]); cx25840_write4(client, DIF_BPF_COEFF3233, coeffs[16]); cx25840_write4(client, DIF_BPF_COEFF3435, coeffs[17]); cx25840_write4(client, DIF_BPF_COEFF36, coeffs[18]); } static void cx23888_std_setup(struct i2c_client *client) { struct cx25840_state *state = to_state(i2c_get_clientdata(client)); v4l2_std_id std = state->std; u32 ifHz; cx25840_write4(client, 0x478, 0x6628021F); cx25840_write4(client, 0x400, 0x0); cx25840_write4(client, 0x4b4, 0x20524030); cx25840_write4(client, 0x47c, 0x010a8263); if (std & V4L2_STD_525_60) { v4l_dbg(1, cx25840_debug, client, "%s() Selecting NTSC", __func__); /* Horiz / vert timing */ cx25840_write4(client, 0x428, 0x1e1e601a); cx25840_write4(client, 0x424, 0x5b2d007a); /* DIF NTSC */ cx25840_write4(client, 0x304, 0x6503bc0c); cx25840_write4(client, 0x308, 0xbd038c85); cx25840_write4(client, 0x30c, 0x1db4640a); cx25840_write4(client, 0x310, 0x00008800); cx25840_write4(client, 0x314, 0x44400400); cx25840_write4(client, 0x32c, 0x0c800800); cx25840_write4(client, 0x330, 0x27000100); cx25840_write4(client, 0x334, 0x1f296e1f); cx25840_write4(client, 0x338, 0x009f50c1); cx25840_write4(client, 0x340, 0x1befbf06); cx25840_write4(client, 0x344, 0x000035e8); /* DIF I/F */ ifHz = 5400000; } else { v4l_dbg(1, cx25840_debug, client, "%s() Selecting PAL-BG", __func__); /* Horiz / vert timing */ cx25840_write4(client, 0x428, 0x28244024); cx25840_write4(client, 0x424, 0x5d2d0084); /* DIF */ cx25840_write4(client, 0x304, 0x6503bc0c); cx25840_write4(client, 0x308, 0xbd038c85); cx25840_write4(client, 0x30c, 0x1db4640a); cx25840_write4(client, 0x310, 0x00008800); cx25840_write4(client, 0x314, 0x44400600); cx25840_write4(client, 0x32c, 0x0c800800); cx25840_write4(client, 0x330, 0x27000100); cx25840_write4(client, 0x334, 0x213530ec); cx25840_write4(client, 0x338, 0x00a65ba8); cx25840_write4(client, 0x340, 0x1befbf06); cx25840_write4(client, 0x344, 0x000035e8); /* DIF I/F */ ifHz = 6000000; } cx23885_dif_setup(client, ifHz); /* Explicitly ensure the inputs are reconfigured after * a standard change. */ set_input(client, state->vid_input, state->aud_input); } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops cx25840_ctrl_ops = { .s_ctrl = cx25840_s_ctrl, }; static const struct v4l2_subdev_core_ops cx25840_core_ops = { .log_status = cx25840_log_status, .reset = cx25840_reset, /* calling the (optional) init op will turn on the generic mode */ .init = cx25840_init, .load_fw = cx25840_load_fw, .s_io_pin_config = common_s_io_pin_config, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = cx25840_g_register, .s_register = cx25840_s_register, #endif .interrupt_service_routine = cx25840_irq_handler, }; static const struct v4l2_subdev_tuner_ops cx25840_tuner_ops = { .s_frequency = cx25840_s_frequency, .s_radio = cx25840_s_radio, .g_tuner = cx25840_g_tuner, .s_tuner = cx25840_s_tuner, }; static const struct v4l2_subdev_audio_ops cx25840_audio_ops = { .s_clock_freq = cx25840_s_clock_freq, .s_routing = cx25840_s_audio_routing, .s_stream = cx25840_s_audio_stream, }; static const struct v4l2_subdev_video_ops cx25840_video_ops = { .g_std = cx25840_g_std, .s_std = cx25840_s_std, .querystd = cx25840_querystd, .s_routing = cx25840_s_video_routing, .s_stream = cx25840_s_stream, .g_input_status = cx25840_g_input_status, }; static const struct v4l2_subdev_vbi_ops cx25840_vbi_ops = { .decode_vbi_line = cx25840_decode_vbi_line, .s_raw_fmt = cx25840_s_raw_fmt, .s_sliced_fmt = cx25840_s_sliced_fmt, .g_sliced_fmt = cx25840_g_sliced_fmt, }; static const struct v4l2_subdev_pad_ops cx25840_pad_ops = { .set_fmt = cx25840_set_fmt, }; static const struct v4l2_subdev_ops cx25840_ops = { .core = &cx25840_core_ops, .tuner = &cx25840_tuner_ops, .audio = &cx25840_audio_ops, .video = &cx25840_video_ops, .vbi = &cx25840_vbi_ops, .pad = &cx25840_pad_ops, .ir = &cx25840_ir_ops, }; /* ----------------------------------------------------------------------- */ static u32 get_cx2388x_ident(struct i2c_client *client) { u32 ret; /* Come out of digital power down */ cx25840_write(client, 0x000, 0); /* * Detecting whether the part is cx23885/7/8 is more * difficult than it needs to be. No ID register. Instead we * probe certain registers indicated in the datasheets to look * for specific defaults that differ between the silicon designs. */ /* It's either 885/7 if the IR Tx Clk Divider register exists */ if (cx25840_read4(client, 0x204) & 0xffff) { /* * CX23885 returns bogus repetitive byte values for the DIF, * which doesn't exist for it. (Ex. 8a8a8a8a or 31313131) */ ret = cx25840_read4(client, 0x300); if (((ret & 0xffff0000) >> 16) == (ret & 0xffff)) { /* No DIF */ ret = CX23885_AV; } else { /* * CX23887 has a broken DIF, but the registers * appear valid (but unused), good enough to detect. */ ret = CX23887_AV; } } else if (cx25840_read4(client, 0x300) & 0x0fffffff) { /* DIF PLL Freq Word reg exists; chip must be a CX23888 */ ret = CX23888_AV; } else { v4l_err(client, "Unable to detect h/w, assuming cx23887\n"); ret = CX23887_AV; } /* Back into digital power down */ cx25840_write(client, 0x000, 2); return ret; } static int cx25840_probe(struct i2c_client *client) { struct cx25840_state *state; struct v4l2_subdev *sd; int default_volume; u32 id; u16 device_id; #if defined(CONFIG_MEDIA_CONTROLLER) int ret; #endif /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_dbg(1, cx25840_debug, client, "detecting cx25840 client on address 0x%x\n", client->addr << 1); device_id = cx25840_read(client, 0x101) << 8; device_id |= cx25840_read(client, 0x100); v4l_dbg(1, cx25840_debug, client, "device_id = 0x%04x\n", device_id); /* * The high byte of the device ID should be * 0x83 for the cx2583x and 0x84 for the cx2584x */ if ((device_id & 0xff00) == 0x8300) { id = CX25836 + ((device_id >> 4) & 0xf) - 6; } else if ((device_id & 0xff00) == 0x8400) { id = CX25840 + ((device_id >> 4) & 0xf); } else if (device_id == 0x0000) { id = get_cx2388x_ident(client); } else if ((device_id & 0xfff0) == 0x5A30) { /* The CX23100 (0x5A3C = 23100) doesn't have an A/V decoder */ id = CX2310X_AV; } else if ((device_id & 0xff) == (device_id >> 8)) { v4l_err(client, "likely a confused/unresponsive cx2388[578] A/V decoder found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); v4l_err(client, "A method to reset it from the cx25840 driver software is not known at this time\n"); return -ENODEV; } else { v4l_dbg(1, cx25840_debug, client, "cx25840 not found\n"); return -ENODEV; } state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &cx25840_ops); #if defined(CONFIG_MEDIA_CONTROLLER) /* * TODO: add media controller support for analog video inputs like * composite, svideo, etc. * A real input pad for this analog demod would be like: * ___________ * TUNER --------> | | * | | * SVIDEO .......> | cx25840 | * | | * COMPOSITE1 ...> |_________| * * However, at least for now, there's no much gain on modelling * those extra inputs. So, let's add it only when needed. */ state->pads[CX25840_PAD_INPUT].flags = MEDIA_PAD_FL_SINK; state->pads[CX25840_PAD_INPUT].sig_type = PAD_SIGNAL_ANALOG; state->pads[CX25840_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE; state->pads[CX25840_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV; sd->entity.function = MEDIA_ENT_F_ATV_DECODER; ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads), state->pads); if (ret < 0) { v4l_info(client, "failed to initialize media entity!\n"); return ret; } #endif switch (id) { case CX23885_AV: v4l_info(client, "cx23885 A/V decoder found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); break; case CX23887_AV: v4l_info(client, "cx23887 A/V decoder found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); break; case CX23888_AV: v4l_info(client, "cx23888 A/V decoder found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); break; case CX2310X_AV: v4l_info(client, "cx%d A/V decoder found @ 0x%x (%s)\n", device_id, client->addr << 1, client->adapter->name); break; case CX25840: case CX25841: case CX25842: case CX25843: /* * Note: revision '(device_id & 0x0f) == 2' was never built. * The marking skips from 0x1 == 22 to 0x3 == 23. */ v4l_info(client, "cx25%3x-2%x found @ 0x%x (%s)\n", (device_id & 0xfff0) >> 4, (device_id & 0x0f) < 3 ? (device_id & 0x0f) + 1 : (device_id & 0x0f), client->addr << 1, client->adapter->name); break; case CX25836: case CX25837: default: v4l_info(client, "cx25%3x-%x found @ 0x%x (%s)\n", (device_id & 0xfff0) >> 4, device_id & 0x0f, client->addr << 1, client->adapter->name); break; } state->c = client; state->vid_input = CX25840_COMPOSITE7; state->aud_input = CX25840_AUDIO8; state->audclk_freq = 48000; state->audmode = V4L2_TUNER_MODE_LANG1; state->vbi_line_offset = 8; state->id = id; state->rev = device_id; state->vbi_regs_offset = id == CX23888_AV ? 0x500 - 0x424 : 0; state->std = V4L2_STD_NTSC_M; v4l2_ctrl_handler_init(&state->hdl, 9); v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops, V4L2_CID_CONTRAST, 0, 127, 1, 64); v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops, V4L2_CID_SATURATION, 0, 127, 1, 64); v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); if (!is_cx2583x(state)) { default_volume = cx25840_read(client, 0x8d4); /* * Enforce the legacy PVR-350/MSP3400 to PVR-150/CX25843 volume * scale mapping limits to avoid -ERANGE errors when * initializing the volume control */ if (default_volume > 228) { /* Bottom out at -96 dB, v4l2 vol range 0x2e00-0x2fff */ default_volume = 228; cx25840_write(client, 0x8d4, 228); } else if (default_volume < 20) { /* Top out at + 8 dB, v4l2 vol range 0xfe00-0xffff */ default_volume = 20; cx25840_write(client, 0x8d4, 20); } default_volume = (((228 - default_volume) >> 1) + 23) << 9; state->volume = v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, default_volume); state->mute = v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_BASS, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_TREBLE, 0, 65535, 65535 / 100, 32768); } sd->ctrl_handler = &state->hdl; if (state->hdl.error) { int err = state->hdl.error; v4l2_ctrl_handler_free(&state->hdl); return err; } if (!is_cx2583x(state)) v4l2_ctrl_cluster(2, &state->volume); v4l2_ctrl_handler_setup(&state->hdl); if (client->dev.platform_data) { struct cx25840_platform_data *pdata = client->dev.platform_data; state->pvr150_workaround = pdata->pvr150_workaround; } cx25840_ir_probe(sd); return 0; } static void cx25840_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct cx25840_state *state = to_state(sd); cx25840_ir_remove(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&state->hdl); } static const struct i2c_device_id cx25840_id[] = { { "cx25840" }, { } }; MODULE_DEVICE_TABLE(i2c, cx25840_id); static struct i2c_driver cx25840_driver = { .driver = { .name = "cx25840", }, .probe = cx25840_probe, .remove = cx25840_remove, .id_table = cx25840_id, }; module_i2c_driver(cx25840_driver);
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Gembird Joypad, "PC Game Controller" * * Copyright (c) 2015 Red Hat, Inc * Copyright (c) 2015 Benjamin Tissoires */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define GEMBIRD_START_FAULTY_RDESC 8 static const __u8 gembird_jpd_faulty_rdesc[] = { 0x75, 0x08, /* Report Size (8) */ 0x95, 0x05, /* Report Count (5) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x26, 0xff, 0x00, /* Logical Maximum (255) */ 0x35, 0x00, /* Physical Minimum (0) */ 0x46, 0xff, 0x00, /* Physical Maximum (255) */ 0x09, 0x30, /* Usage (X) */ 0x09, 0x31, /* Usage (Y) */ 0x09, 0x32, /* Usage (Z) */ 0x09, 0x32, /* Usage (Z) */ 0x09, 0x35, /* Usage (Rz) */ 0x81, 0x02, /* Input (Data,Var,Abs) */ }; /* * we fix the report descriptor by: * - marking the first Z axis as constant (so it is ignored by HID) * - assign the original second Z to Rx * - assign the original Rz to Ry */ static const __u8 gembird_jpd_fixed_rdesc[] = { 0x75, 0x08, /* Report Size (8) */ 0x95, 0x02, /* Report Count (2) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x26, 0xff, 0x00, /* Logical Maximum (255) */ 0x35, 0x00, /* Physical Minimum (0) */ 0x46, 0xff, 0x00, /* Physical Maximum (255) */ 0x09, 0x30, /* Usage (X) */ 0x09, 0x31, /* Usage (Y) */ 0x81, 0x02, /* Input (Data,Var,Abs) */ 0x95, 0x01, /* Report Count (1) */ 0x09, 0x32, /* Usage (Z) */ 0x81, 0x01, /* Input (Cnst,Arr,Abs) */ 0x95, 0x02, /* Report Count (2) */ 0x09, 0x33, /* Usage (Rx) */ 0x09, 0x34, /* Usage (Ry) */ 0x81, 0x02, /* Input (Data,Var,Abs) */ }; static const __u8 *gembird_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { __u8 *new_rdesc; /* delta_size is > 0 */ size_t delta_size = sizeof(gembird_jpd_fixed_rdesc) - sizeof(gembird_jpd_faulty_rdesc); size_t new_size = *rsize + delta_size; if (*rsize >= 31 && !memcmp(&rdesc[GEMBIRD_START_FAULTY_RDESC], gembird_jpd_faulty_rdesc, sizeof(gembird_jpd_faulty_rdesc))) { new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL); if (new_rdesc == NULL) return rdesc; dev_info(&hdev->dev, "fixing Gembird JPD-DualForce 2 report descriptor.\n"); /* start by copying the end of the rdesc */ memcpy(new_rdesc + delta_size, rdesc, *rsize); /* add the correct beginning */ memcpy(new_rdesc, rdesc, GEMBIRD_START_FAULTY_RDESC); /* replace the faulty part with the fixed one */ memcpy(new_rdesc + GEMBIRD_START_FAULTY_RDESC, gembird_jpd_fixed_rdesc, sizeof(gembird_jpd_fixed_rdesc)); *rsize = new_size; rdesc = new_rdesc; } return rdesc; } static const struct hid_device_id gembird_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, { } }; MODULE_DEVICE_TABLE(hid, gembird_devices); static struct hid_driver gembird_driver = { .name = "gembird", .id_table = gembird_devices, .report_fixup = gembird_report_fixup, }; module_hid_driver(gembird_driver); MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_DESCRIPTION("HID Gembird joypad driver"); MODULE_LICENSE("GPL");
307 308 200 201 13 13 13 13 13 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Christian Brauner <brauner@kernel.org> */ #include <linux/cred.h> #include <linux/fs.h> #include <linux/mnt_idmapping.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/seq_file.h> #include "internal.h" /* * Outside of this file vfs{g,u}id_t are always created from k{g,u}id_t, * never from raw values. These are just internal helpers. */ #define VFSUIDT_INIT_RAW(val) (vfsuid_t){ val } #define VFSGIDT_INIT_RAW(val) (vfsgid_t){ val } struct mnt_idmap { struct uid_gid_map uid_map; struct uid_gid_map gid_map; refcount_t count; }; /* * Carries the initial idmapping of 0:0:4294967295 which is an identity * mapping. This means that {g,u}id 0 is mapped to {g,u}id 0, {g,u}id 1 is * mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...]. */ struct mnt_idmap nop_mnt_idmap = { .count = REFCOUNT_INIT(1), }; EXPORT_SYMBOL_GPL(nop_mnt_idmap); /* * Carries the invalid idmapping of a full 0-4294967295 {g,u}id range. * This means that all {g,u}ids are mapped to INVALID_VFS{G,U}ID. */ struct mnt_idmap invalid_mnt_idmap = { .count = REFCOUNT_INIT(1), }; EXPORT_SYMBOL_GPL(invalid_mnt_idmap); /** * initial_idmapping - check whether this is the initial mapping * @ns: idmapping to check * * Check whether this is the initial mapping, mapping 0 to 0, 1 to 1, * [...], 1000 to 1000 [...]. * * Return: true if this is the initial mapping, false if not. */ static inline bool initial_idmapping(const struct user_namespace *ns) { return ns == &init_user_ns; } /** * make_vfsuid - map a filesystem kuid according to an idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kuid : kuid to be mapped * * Take a @kuid and remap it from @fs_userns into @idmap. Use this * function when preparing a @kuid to be reported to userspace. * * If initial_idmapping() determines that this is not an idmapped mount * we can simply return @kuid unchanged. * If initial_idmapping() tells us that the filesystem is not mounted with an * idmapping we know the value of @kuid won't change when calling * from_kuid() so we can simply retrieve the value via __kuid_val() * directly. * * Return: @kuid mapped according to @idmap. * If @kuid has no mapping in either @idmap or @fs_userns INVALID_UID is * returned. */ vfsuid_t make_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kuid_t kuid) { uid_t uid; if (idmap == &nop_mnt_idmap) return VFSUIDT_INIT(kuid); if (idmap == &invalid_mnt_idmap) return INVALID_VFSUID; if (initial_idmapping(fs_userns)) uid = __kuid_val(kuid); else uid = from_kuid(fs_userns, kuid); if (uid == (uid_t)-1) return INVALID_VFSUID; return VFSUIDT_INIT_RAW(map_id_down(&idmap->uid_map, uid)); } EXPORT_SYMBOL_GPL(make_vfsuid); /** * make_vfsgid - map a filesystem kgid according to an idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kgid : kgid to be mapped * * Take a @kgid and remap it from @fs_userns into @idmap. Use this * function when preparing a @kgid to be reported to userspace. * * If initial_idmapping() determines that this is not an idmapped mount * we can simply return @kgid unchanged. * If initial_idmapping() tells us that the filesystem is not mounted with an * idmapping we know the value of @kgid won't change when calling * from_kgid() so we can simply retrieve the value via __kgid_val() * directly. * * Return: @kgid mapped according to @idmap. * If @kgid has no mapping in either @idmap or @fs_userns INVALID_GID is * returned. */ vfsgid_t make_vfsgid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kgid_t kgid) { gid_t gid; if (idmap == &nop_mnt_idmap) return VFSGIDT_INIT(kgid); if (idmap == &invalid_mnt_idmap) return INVALID_VFSGID; if (initial_idmapping(fs_userns)) gid = __kgid_val(kgid); else gid = from_kgid(fs_userns, kgid); if (gid == (gid_t)-1) return INVALID_VFSGID; return VFSGIDT_INIT_RAW(map_id_down(&idmap->gid_map, gid)); } EXPORT_SYMBOL_GPL(make_vfsgid); /** * from_vfsuid - map a vfsuid into the filesystem idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @vfsuid : vfsuid to be mapped * * Map @vfsuid into the filesystem idmapping. This function has to be used in * order to e.g. write @vfsuid to inode->i_uid. * * Return: @vfsuid mapped into the filesystem idmapping */ kuid_t from_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsuid_t vfsuid) { uid_t uid; if (idmap == &nop_mnt_idmap) return AS_KUIDT(vfsuid); if (idmap == &invalid_mnt_idmap) return INVALID_UID; uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid)); if (uid == (uid_t)-1) return INVALID_UID; if (initial_idmapping(fs_userns)) return KUIDT_INIT(uid); return make_kuid(fs_userns, uid); } EXPORT_SYMBOL_GPL(from_vfsuid); /** * from_vfsgid - map a vfsgid into the filesystem idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @vfsgid : vfsgid to be mapped * * Map @vfsgid into the filesystem idmapping. This function has to be used in * order to e.g. write @vfsgid to inode->i_gid. * * Return: @vfsgid mapped into the filesystem idmapping */ kgid_t from_vfsgid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsgid_t vfsgid) { gid_t gid; if (idmap == &nop_mnt_idmap) return AS_KGIDT(vfsgid); if (idmap == &invalid_mnt_idmap) return INVALID_GID; gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid)); if (gid == (gid_t)-1) return INVALID_GID; if (initial_idmapping(fs_userns)) return KGIDT_INIT(gid); return make_kgid(fs_userns, gid); } EXPORT_SYMBOL_GPL(from_vfsgid); #ifdef CONFIG_MULTIUSER /** * vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups * @vfsgid: the mnt gid to match * * This function can be used to determine whether @vfsuid matches any of the * caller's groups. * * Return: 1 if vfsuid matches caller's groups, 0 if not. */ int vfsgid_in_group_p(vfsgid_t vfsgid) { return in_group_p(AS_KGIDT(vfsgid)); } #else int vfsgid_in_group_p(vfsgid_t vfsgid) { return 1; } #endif EXPORT_SYMBOL_GPL(vfsgid_in_group_p); static int copy_mnt_idmap(struct uid_gid_map *map_from, struct uid_gid_map *map_to) { struct uid_gid_extent *forward, *reverse; u32 nr_extents = READ_ONCE(map_from->nr_extents); /* Pairs with smp_wmb() when writing the idmapping. */ smp_rmb(); /* * Don't blindly copy @map_to into @map_from if nr_extents is * smaller or equal to UID_GID_MAP_MAX_BASE_EXTENTS. Since we * read @nr_extents someone could have written an idmapping and * then we might end up with inconsistent data. So just don't do * anything at all. */ if (nr_extents == 0) return -EINVAL; /* * Here we know that nr_extents is greater than zero which means * a map has been written. Since idmappings can't be changed * once they have been written we know that we can safely copy * from @map_to into @map_from. */ if (nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) { *map_to = *map_from; return 0; } forward = kmemdup_array(map_from->forward, nr_extents, sizeof(struct uid_gid_extent), GFP_KERNEL_ACCOUNT); if (!forward) return -ENOMEM; reverse = kmemdup_array(map_from->reverse, nr_extents, sizeof(struct uid_gid_extent), GFP_KERNEL_ACCOUNT); if (!reverse) { kfree(forward); return -ENOMEM; } /* * The idmapping isn't exposed anywhere so we don't need to care * about ordering between extent pointers and @nr_extents * initialization. */ map_to->forward = forward; map_to->reverse = reverse; map_to->nr_extents = nr_extents; return 0; } static void free_mnt_idmap(struct mnt_idmap *idmap) { if (idmap->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) { kfree(idmap->uid_map.forward); kfree(idmap->uid_map.reverse); } if (idmap->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) { kfree(idmap->gid_map.forward); kfree(idmap->gid_map.reverse); } kfree(idmap); } struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns) { struct mnt_idmap *idmap; int ret; idmap = kzalloc(sizeof(struct mnt_idmap), GFP_KERNEL_ACCOUNT); if (!idmap) return ERR_PTR(-ENOMEM); refcount_set(&idmap->count, 1); ret = copy_mnt_idmap(&mnt_userns->uid_map, &idmap->uid_map); if (!ret) ret = copy_mnt_idmap(&mnt_userns->gid_map, &idmap->gid_map); if (ret) { free_mnt_idmap(idmap); idmap = ERR_PTR(ret); } return idmap; } /** * mnt_idmap_get - get a reference to an idmapping * @idmap: the idmap to bump the reference on * * If @idmap is not the @nop_mnt_idmap bump the reference count. * * Return: @idmap with reference count bumped if @not_mnt_idmap isn't passed. */ struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap) { if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap) refcount_inc(&idmap->count); return idmap; } EXPORT_SYMBOL_GPL(mnt_idmap_get); /** * mnt_idmap_put - put a reference to an idmapping * @idmap: the idmap to put the reference on * * If this is a non-initial idmapping, put the reference count when a mount is * released and free it if we're the last user. */ void mnt_idmap_put(struct mnt_idmap *idmap) { if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap && refcount_dec_and_test(&idmap->count)) free_mnt_idmap(idmap); } EXPORT_SYMBOL_GPL(mnt_idmap_put); int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map) { struct uid_gid_map *map, *map_up; u32 idx, nr_mappings; if (!is_valid_mnt_idmap(idmap)) return 0; /* * Idmappings are shown relative to the caller's idmapping. * This is both the most intuitive and most useful solution. */ if (uid_map) { map = &idmap->uid_map; map_up = &current_user_ns()->uid_map; } else { map = &idmap->gid_map; map_up = &current_user_ns()->gid_map; } for (idx = 0, nr_mappings = 0; idx < map->nr_extents; idx++) { uid_t lower; struct uid_gid_extent *extent; if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) extent = &map->extent[idx]; else extent = &map->forward[idx]; /* * Verify that the whole range of the mapping can be * resolved in the caller's idmapping. If it cannot be * resolved skip the mapping. */ lower = map_id_range_up(map_up, extent->lower_first, extent->count); if (lower == (uid_t) -1) continue; seq_printf(seq, "%u %u %u", extent->first, lower, extent->count); seq->count++; /* mappings are separated by \0 */ if (seq_has_overflowed(seq)) return -EAGAIN; nr_mappings++; } return nr_mappings; }
11 11 11 11 2 2 105 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_GENERIC_NETLINK_H #define __NET_GENERIC_NETLINK_H #include <linux/net.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <uapi/linux/genetlink.h> #define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN) /* Non-parallel generic netlink requests are serialized by a global lock. */ void genl_lock(void); void genl_unlock(void); #define MODULE_ALIAS_GENL_FAMILY(family) \ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) /* Binding to multicast group requires %CAP_NET_ADMIN */ #define GENL_MCAST_CAP_NET_ADMIN BIT(0) /* Binding to multicast group requires %CAP_SYS_ADMIN */ #define GENL_MCAST_CAP_SYS_ADMIN BIT(1) /** * struct genl_multicast_group - generic netlink multicast group * @name: name of the multicast group, names are per-family * @flags: GENL_MCAST_* flags */ struct genl_multicast_group { char name[GENL_NAMSIZ]; u8 flags; }; struct genl_split_ops; struct genl_info; /** * struct genl_family - generic netlink family * @hdrsize: length of user specific header in bytes * @name: name of family * @version: protocol version * @maxattr: maximum number of attributes supported * @policy: netlink policy * @netnsok: set to true if the family can handle network * namespaces and should be presented in all of them * @parallel_ops: operations can be called in parallel and aren't * synchronized by the core genetlink code * @pre_doit: called before an operation's doit callback, it may * do additional, common, filtering and return an error * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks * @bind: called when family multicast group is added to a netlink socket * @unbind: called when family multicast group is removed from a netlink socket * @module: pointer to the owning module (set to THIS_MODULE) * @mcgrps: multicast groups used by this family * @n_mcgrps: number of multicast groups * @resv_start_op: first operation for which reserved fields of the header * can be validated and policies are required (see below); * new families should leave this field at zero * @ops: the operations supported by this family * @n_ops: number of operations supported by this family * @small_ops: the small-struct operations supported by this family * @n_small_ops: number of small-struct operations supported by this family * @split_ops: the split do/dump form of operation definition * @n_split_ops: number of entries in @split_ops, not that with split do/dump * ops the number of entries is not the same as number of commands * @sock_priv_size: the size of per-socket private memory * @sock_priv_init: the per-socket private memory initializer * @sock_priv_destroy: the per-socket private memory destructor * * Attribute policies (the combination of @policy and @maxattr fields) * can be attached at the family level or at the operation level. * If both are present the per-operation policy takes precedence. * For operations before @resv_start_op lack of policy means that the core * will perform no attribute parsing or validation. For newer operations * if policy is not provided core will reject all TLV attributes. */ struct genl_family { unsigned int hdrsize; char name[GENL_NAMSIZ]; unsigned int version; unsigned int maxattr; u8 netnsok:1; u8 parallel_ops:1; u8 n_ops; u8 n_small_ops; u8 n_split_ops; u8 n_mcgrps; u8 resv_start_op; const struct nla_policy *policy; int (*pre_doit)(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info); void (*post_doit)(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info); int (*bind)(int mcgrp); void (*unbind)(int mcgrp); const struct genl_ops * ops; const struct genl_small_ops *small_ops; const struct genl_split_ops *split_ops; const struct genl_multicast_group *mcgrps; struct module *module; size_t sock_priv_size; void (*sock_priv_init)(void *priv); void (*sock_priv_destroy)(void *priv); /* private: internal use only */ /* protocol family identifier */ int id; /* starting number of multicast group IDs in this family */ unsigned int mcgrp_offset; /* list of per-socket privs */ struct xarray *sock_privs; }; /** * struct genl_info - receiving information * @snd_seq: sending sequence number * @snd_portid: netlink portid of sender * @family: generic netlink family * @nlhdr: netlink message header * @genlhdr: generic netlink message header * @attrs: netlink attributes * @_net: network namespace * @ctx: storage space for the use by the family * @user_ptr: user pointers (deprecated, use ctx instead) * @extack: extended ACK report struct */ struct genl_info { u32 snd_seq; u32 snd_portid; const struct genl_family *family; const struct nlmsghdr * nlhdr; struct genlmsghdr * genlhdr; struct nlattr ** attrs; possible_net_t _net; union { u8 ctx[NETLINK_CTX_SIZE]; void * user_ptr[2]; }; struct netlink_ext_ack *extack; }; static inline struct net *genl_info_net(const struct genl_info *info) { return read_pnet(&info->_net); } static inline void genl_info_net_set(struct genl_info *info, struct net *net) { write_pnet(&info->_net, net); } static inline void *genl_info_userhdr(const struct genl_info *info) { return (u8 *)info->genlhdr + GENL_HDRLEN; } #define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg) #define GENL_SET_ERR_MSG_FMT(info, msg, args...) \ NL_SET_ERR_MSG_FMT((info)->extack, msg, ##args) /* Report that a root attribute is missing */ #define GENL_REQ_ATTR_CHECK(info, attr) ({ \ const struct genl_info *__info = (info); \ \ NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \ }) enum genl_validate_flags { GENL_DONT_VALIDATE_STRICT = BIT(0), GENL_DONT_VALIDATE_DUMP = BIT(1), GENL_DONT_VALIDATE_DUMP_STRICT = BIT(2), }; /** * struct genl_small_ops - generic netlink operations (small version) * @cmd: command identifier * @internal_flags: flags used by the family * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM) * @validate: validation flags from enum genl_validate_flags * @doit: standard command callback * @dumpit: callback for dumpers * * This is a cut-down version of struct genl_ops for users who don't need * most of the ancillary infra and want to save space. */ struct genl_small_ops { int (*doit)(struct sk_buff *skb, struct genl_info *info); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; /** * struct genl_ops - generic netlink operations * @cmd: command identifier * @internal_flags: flags used by the family * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM) * @maxattr: maximum number of attributes supported * @policy: netlink policy (takes precedence over family policy) * @validate: validation flags from enum genl_validate_flags * @doit: standard command callback * @start: start callback for dumps * @dumpit: callback for dumpers * @done: completion callback for dumps */ struct genl_ops { int (*doit)(struct sk_buff *skb, struct genl_info *info); int (*start)(struct netlink_callback *cb); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); const struct nla_policy *policy; unsigned int maxattr; u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; /** * struct genl_split_ops - generic netlink operations (do/dump split version) * @cmd: command identifier * @internal_flags: flags used by the family * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM) * @validate: validation flags from enum genl_validate_flags * @policy: netlink policy (takes precedence over family policy) * @maxattr: maximum number of attributes supported * * Do callbacks: * @pre_doit: called before an operation's @doit callback, it may * do additional, common, filtering and return an error * @doit: standard command callback * @post_doit: called after an operation's @doit callback, it may * undo operations done by pre_doit, for example release locks * * Dump callbacks: * @start: start callback for dumps * @dumpit: callback for dumpers * @done: completion callback for dumps * * Do callbacks can be used if %GENL_CMD_CAP_DO is set in @flags. * Dump callbacks can be used if %GENL_CMD_CAP_DUMP is set in @flags. * Exactly one of those flags must be set. */ struct genl_split_ops { union { struct { int (*pre_doit)(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info); int (*doit)(struct sk_buff *skb, struct genl_info *info); void (*post_doit)(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info); }; struct { int (*start)(struct netlink_callback *cb); int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); }; }; const struct nla_policy *policy; unsigned int maxattr; u8 cmd; u8 internal_flags; u8 flags; u8 validate; }; /** * struct genl_dumpit_info - info that is available during dumpit op call * @op: generic netlink ops - for internal genl code usage * @attrs: netlink attributes * @info: struct genl_info describing the request */ struct genl_dumpit_info { struct genl_split_ops op; struct genl_info info; }; static inline const struct genl_dumpit_info * genl_dumpit_info(struct netlink_callback *cb) { return cb->data; } static inline const struct genl_info * genl_info_dump(struct netlink_callback *cb) { return &genl_dumpit_info(cb)->info; } /** * genl_info_init_ntf() - initialize genl_info for notifications * @info: genl_info struct to set up * @family: pointer to the genetlink family * @cmd: command to be used in the notification * * Initialize a locally declared struct genl_info to pass to various APIs. * Intended to be used when creating notifications. */ static inline void genl_info_init_ntf(struct genl_info *info, const struct genl_family *family, u8 cmd) { struct genlmsghdr *hdr = (void *) &info->user_ptr[0]; memset(info, 0, sizeof(*info)); info->family = family; info->genlhdr = hdr; hdr->cmd = cmd; } static inline bool genl_info_is_ntf(const struct genl_info *info) { return !info->nlhdr; } void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk); void *genl_sk_priv_get(struct genl_family *family, struct sock *sk); int genl_register_family(struct genl_family *family); int genl_unregister_family(const struct genl_family *family); void genl_notify(const struct genl_family *family, struct sk_buff *skb, struct genl_info *info, u32 group, gfp_t flags); void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, const struct genl_family *family, int flags, u8 cmd); static inline void * __genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags) { return genlmsg_put(skb, info->snd_portid, info->snd_seq, info->family, flags, info->genlhdr->cmd); } /** * genlmsg_iput - start genetlink message based on genl_info * @skb: skb in which message header will be placed * @info: genl_info as provided to do/dump handlers * * Convenience wrapper which starts a genetlink message based on * information in user request. @info should be either the struct passed * by genetlink core to do/dump handlers (when constructing replies to * such requests) or a struct initialized by genl_info_init_ntf() * when constructing notifications. * * Returns: pointer to new genetlink header. */ static inline void * genlmsg_iput(struct sk_buff *skb, const struct genl_info *info) { return __genlmsg_iput(skb, info, 0); } /** * genlmsg_nlhdr - Obtain netlink header from user specified header * @user_hdr: user header as returned from genlmsg_put() * * Returns: pointer to netlink header. */ static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr) { return (struct nlmsghdr *)((char *)user_hdr - GENL_HDRLEN - NLMSG_HDRLEN); } /** * genlmsg_parse_deprecated - parse attributes of a genetlink message * @nlh: netlink message header * @family: genetlink message family * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @policy: validation policy * @extack: extended ACK report struct */ static inline int genlmsg_parse_deprecated(const struct nlmsghdr *nlh, const struct genl_family *family, struct nlattr *tb[], int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype, policy, NL_VALIDATE_LIBERAL, extack); } /** * genlmsg_parse - parse attributes of a genetlink message * @nlh: netlink message header * @family: genetlink message family * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @policy: validation policy * @extack: extended ACK report struct */ static inline int genlmsg_parse(const struct nlmsghdr *nlh, const struct genl_family *family, struct nlattr *tb[], int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) { return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype, policy, NL_VALIDATE_STRICT, extack); } /** * genl_dump_check_consistent - check if sequence is consistent and advertise if not * @cb: netlink callback structure that stores the sequence number * @user_hdr: user header as returned from genlmsg_put() * * Cf. nl_dump_check_consistent(), this just provides a wrapper to make it * simpler to use with generic netlink. */ static inline void genl_dump_check_consistent(struct netlink_callback *cb, void *user_hdr) { nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr)); } /** * genlmsg_put_reply - Add generic netlink header to a reply message * @skb: socket buffer holding the message * @info: receiver info * @family: generic netlink family * @flags: netlink message flags * @cmd: generic netlink command * * Returns: pointer to user specific header */ static inline void *genlmsg_put_reply(struct sk_buff *skb, struct genl_info *info, const struct genl_family *family, int flags, u8 cmd) { return genlmsg_put(skb, info->snd_portid, info->snd_seq, family, flags, cmd); } /** * genlmsg_end - Finalize a generic netlink message * @skb: socket buffer the message is stored in * @hdr: user specific header */ static inline void genlmsg_end(struct sk_buff *skb, void *hdr) { nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); } /** * genlmsg_cancel - Cancel construction of a generic netlink message * @skb: socket buffer the message is stored in * @hdr: generic netlink message header */ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr) { if (hdr) nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); } /** * genlmsg_multicast_netns_filtered - multicast a netlink message * to a specific netns with filter * function * @family: the generic netlink family * @net: the net namespace * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself * @group: offset of multicast group in groups array * @flags: allocation flags * @filter: filter function * @filter_data: filter function private data * * Return: 0 on success, negative error code for failure. */ static inline int genlmsg_multicast_netns_filtered(const struct genl_family *family, struct net *net, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags, netlink_filter_fn filter, void *filter_data) { if (WARN_ON_ONCE(group >= family->n_mcgrps)) return -EINVAL; group = family->mcgrp_offset + group; return nlmsg_multicast_filtered(net->genl_sock, skb, portid, group, flags, filter, filter_data); } /** * genlmsg_multicast_netns - multicast a netlink message to a specific netns * @family: the generic netlink family * @net: the net namespace * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself * @group: offset of multicast group in groups array * @flags: allocation flags */ static inline int genlmsg_multicast_netns(const struct genl_family *family, struct net *net, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { return genlmsg_multicast_netns_filtered(family, net, skb, portid, group, flags, NULL, NULL); } /** * genlmsg_multicast - multicast a netlink message to the default netns * @family: the generic netlink family * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself * @group: offset of multicast group in groups array * @flags: allocation flags */ static inline int genlmsg_multicast(const struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) { return genlmsg_multicast_netns(family, &init_net, skb, portid, group, flags); } /** * genlmsg_multicast_allns - multicast a netlink message to all net namespaces * @family: the generic netlink family * @skb: netlink message as socket buffer * @portid: own netlink portid to avoid sending to yourself * @group: offset of multicast group in groups array * * This function must hold the RTNL or rcu_read_lock(). */ int genlmsg_multicast_allns(const struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group); /** * genlmsg_unicast - unicast a netlink message * @net: network namespace to look up @portid in * @skb: netlink message as socket buffer * @portid: netlink portid of the destination socket */ static inline int genlmsg_unicast(struct net *net, struct sk_buff *skb, u32 portid) { return nlmsg_unicast(net->genl_sock, skb, portid); } /** * genlmsg_reply - reply to a request * @skb: netlink message to be sent back * @info: receiver information */ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info) { return genlmsg_unicast(genl_info_net(info), skb, info->snd_portid); } /** * genlmsg_data - head of message payload * @gnlh: genetlink message header */ static inline void *genlmsg_data(const struct genlmsghdr *gnlh) { return ((unsigned char *) gnlh + GENL_HDRLEN); } /** * genlmsg_len - length of message payload * @gnlh: genetlink message header */ static inline int genlmsg_len(const struct genlmsghdr *gnlh) { struct nlmsghdr *nlh = (struct nlmsghdr *)((unsigned char *)gnlh - NLMSG_HDRLEN); return (nlh->nlmsg_len - GENL_HDRLEN - NLMSG_HDRLEN); } /** * genlmsg_msg_size - length of genetlink message not including padding * @payload: length of message payload */ static inline int genlmsg_msg_size(int payload) { return GENL_HDRLEN + payload; } /** * genlmsg_total_size - length of genetlink message including padding * @payload: length of message payload */ static inline int genlmsg_total_size(int payload) { return NLMSG_ALIGN(genlmsg_msg_size(payload)); } /** * genlmsg_new - Allocate a new generic netlink message * @payload: size of the message payload * @flags: the type of memory to allocate. */ static inline struct sk_buff *genlmsg_new(size_t payload, gfp_t flags) { return nlmsg_new(genlmsg_total_size(payload), flags); } /** * genl_set_err - report error to genetlink broadcast listeners * @family: the generic netlink family * @net: the network namespace to report the error to * @portid: the PORTID of a process that we want to skip (if any) * @group: the broadcast group that will notice the error * (this is the offset of the multicast group in the groups array) * @code: error code, must be negative (as usual in kernelspace) * * This function returns the number of broadcast listeners that have set the * NETLINK_RECV_NO_ENOBUFS socket option. */ static inline int genl_set_err(const struct genl_family *family, struct net *net, u32 portid, u32 group, int code) { if (WARN_ON_ONCE(group >= family->n_mcgrps)) return -EINVAL; group = family->mcgrp_offset + group; return netlink_set_err(net->genl_sock, portid, group, code); } static inline int genl_has_listeners(const struct genl_family *family, struct net *net, unsigned int group) { if (WARN_ON_ONCE(group >= family->n_mcgrps)) return -EINVAL; group = family->mcgrp_offset + group; return netlink_has_listeners(net->genl_sock, group); } #endif /* __NET_GENERIC_NETLINK_H */
1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 // SPDX-License-Identifier: GPL-2.0-only /* DVB USB framework compliant Linux driver for the HanfTek UMT-010 USB2.0 * DVB-T receiver. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" #include "mt352.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int umt_mt352_demod_init(struct dvb_frontend *fe) { static u8 mt352_clock_config[] = { 0x89, 0xb8, 0x2d }; static u8 mt352_reset[] = { 0x50, 0x80 }; static u8 mt352_mclk_ratio[] = { 0x8b, 0x00 }; static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 }; static u8 mt352_agc_cfg[] = { 0x67, 0x10, 0xa0 }; static u8 mt352_sec_agc_cfg1[] = { 0x6a, 0xff }; static u8 mt352_sec_agc_cfg2[] = { 0x6d, 0xff }; static u8 mt352_sec_agc_cfg3[] = { 0x70, 0x40 }; static u8 mt352_sec_agc_cfg4[] = { 0x7b, 0x03 }; static u8 mt352_sec_agc_cfg5[] = { 0x7d, 0x0f }; static u8 mt352_acq_ctl[] = { 0x53, 0x50 }; static u8 mt352_input_freq_1[] = { 0x56, 0x31, 0x06 }; mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); udelay(2000); mt352_write(fe, mt352_reset, sizeof(mt352_reset)); mt352_write(fe, mt352_mclk_ratio, sizeof(mt352_mclk_ratio)); mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); mt352_write(fe, mt352_sec_agc_cfg1, sizeof(mt352_sec_agc_cfg1)); mt352_write(fe, mt352_sec_agc_cfg2, sizeof(mt352_sec_agc_cfg2)); mt352_write(fe, mt352_sec_agc_cfg3, sizeof(mt352_sec_agc_cfg3)); mt352_write(fe, mt352_sec_agc_cfg4, sizeof(mt352_sec_agc_cfg4)); mt352_write(fe, mt352_sec_agc_cfg5, sizeof(mt352_sec_agc_cfg5)); mt352_write(fe, mt352_acq_ctl, sizeof(mt352_acq_ctl)); mt352_write(fe, mt352_input_freq_1, sizeof(mt352_input_freq_1)); return 0; } static int umt_mt352_frontend_attach(struct dvb_usb_adapter *adap) { struct mt352_config umt_config; memset(&umt_config,0,sizeof(struct mt352_config)); umt_config.demod_init = umt_mt352_demod_init; umt_config.demod_address = 0xf; adap->fe_adap[0].fe = dvb_attach(mt352_attach, &umt_config, &adap->dev->i2c_adap); return 0; } static int umt_tuner_attach (struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_TUA6034); return 0; } /* USB Driver stuff */ static struct dvb_usb_device_properties umt_properties; static int umt_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (0 == dvb_usb_device_init(intf, &umt_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -EINVAL; } /* do not change the order of the ID table */ enum { HANFTEK_UMT_010_COLD, HANFTEK_UMT_010_WARM, }; static const struct usb_device_id umt_table[] = { DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_COLD), DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_WARM), { } }; MODULE_DEVICE_TABLE (usb, umt_table); static struct dvb_usb_device_properties umt_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-umt-010-02.fw", .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = dibusb2_0_streaming_ctrl, .frontend_attach = umt_mt352_frontend_attach, .tuner_attach = umt_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = MAX_NO_URBS_FOR_DATA_STREAM, .endpoint = 0x06, .u = { .bulk = { .buffersize = 512, } } }, }}, .size_of_priv = sizeof(struct dibusb_state), } }, .power_ctrl = dibusb_power_ctrl, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "Hanftek UMT-010 DVB-T USB2.0", { &umt_table[HANFTEK_UMT_010_COLD], NULL }, { &umt_table[HANFTEK_UMT_010_WARM], NULL }, }, } }; static struct usb_driver umt_driver = { .name = "dvb_usb_umt_010", .probe = umt_probe, .disconnect = dvb_usb_device_exit, .id_table = umt_table, }; module_usb_driver(umt_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
14 8 14 9 14 14 9 2 2 2 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions Copyright (C) 1992 Drew Eckhardt */ #ifndef _LINUX_BLKDEV_H #define _LINUX_BLKDEV_H #include <linux/types.h> #include <linux/blk_types.h> #include <linux/device.h> #include <linux/list.h> #include <linux/llist.h> #include <linux/minmax.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/bio.h> #include <linux/gfp.h> #include <linux/kdev_t.h> #include <linux/rcupdate.h> #include <linux/percpu-refcount.h> #include <linux/blkzoned.h> #include <linux/sched.h> #include <linux/sbitmap.h> #include <linux/uuid.h> #include <linux/xarray.h> #include <linux/file.h> #include <linux/lockdep.h> struct module; struct request_queue; struct elevator_queue; struct blk_trace; struct request; struct sg_io_hdr; struct blkcg_gq; struct blk_flush_queue; struct kiocb; struct pr_ops; struct rq_qos; struct blk_queue_stats; struct blk_stat_callback; struct blk_crypto_profile; extern const struct device_type disk_type; extern const struct device_type part_type; extern const struct class block_class; /* * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ #define BLKCG_MAX_POLS 6 #define DISK_MAX_PARTS 256 #define DISK_NAME_LEN 32 #define PARTITION_META_INFO_VOLNAMELTH 64 /* * Enough for the string representation of any kind of UUID plus NULL. * EFI UUID is 36 characters. MSDOS UUID is 11 characters. */ #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1) struct partition_meta_info { char uuid[PARTITION_META_INFO_UUIDLTH]; u8 volname[PARTITION_META_INFO_VOLNAMELTH]; }; /** * DOC: genhd capability flags * * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to * removable media. When set, the device remains present even when media is not * inserted. Shall not be set for devices which are removed entirely when the * media is removed. * * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events, * doesn't appear in sysfs, and can't be opened from userspace or using * blkdev_get*. Used for the underlying components of multipath devices. * * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not * scan for partitions from add_disk, and users can't add partitions manually. * */ enum { GENHD_FL_REMOVABLE = 1 << 0, GENHD_FL_HIDDEN = 1 << 1, GENHD_FL_NO_PART = 1 << 2, }; enum { DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ }; enum { /* Poll even if events_poll_msecs is unset */ DISK_EVENT_FLAG_POLL = 1 << 0, /* Forward events to udev */ DISK_EVENT_FLAG_UEVENT = 1 << 1, /* Block event polling when open for exclusive write */ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2, }; struct disk_events; struct badblocks; enum blk_integrity_checksum { BLK_INTEGRITY_CSUM_NONE = 0, BLK_INTEGRITY_CSUM_IP = 1, BLK_INTEGRITY_CSUM_CRC = 2, BLK_INTEGRITY_CSUM_CRC64 = 3, } __packed ; struct blk_integrity { unsigned char flags; enum blk_integrity_checksum csum_type; unsigned char tuple_size; unsigned char pi_offset; unsigned char interval_exp; unsigned char tag_size; }; typedef unsigned int __bitwise blk_mode_t; /* open for reading */ #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0)) /* open for writing */ #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1)) /* open exclusively (vs other exclusive openers */ #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2)) /* opened with O_NDELAY */ #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3)) /* open for "writes" only for ioctls (specialy hack for floppy.c) */ #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4)) /* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */ #define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5)) /* return partition scanning errors */ #define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6)) struct gendisk { /* * major/first_minor/minors should not be set by any new driver, the * block core will take care of allocating them automatically. */ int major; int first_minor; int minors; char disk_name[DISK_NAME_LEN]; /* name of major driver */ unsigned short events; /* supported events */ unsigned short event_flags; /* flags related to event processing */ struct xarray part_tbl; struct block_device *part0; const struct block_device_operations *fops; struct request_queue *queue; void *private_data; struct bio_set bio_split; int flags; unsigned long state; #define GD_NEED_PART_SCAN 0 #define GD_READ_ONLY 1 #define GD_DEAD 2 #define GD_NATIVE_CAPACITY 3 #define GD_ADDED 4 #define GD_SUPPRESS_PART_SCAN 5 #define GD_OWNS_QUEUE 6 struct mutex open_mutex; /* open/close mutex */ unsigned open_partitions; /* number of open partitions */ struct backing_dev_info *bdi; struct kobject queue_kobj; /* the queue/ directory */ struct kobject *slave_dir; #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED struct list_head slave_bdevs; #endif struct timer_rand_state *random; atomic_t sync_io; /* RAID */ struct disk_events *ev; #ifdef CONFIG_BLK_DEV_ZONED /* * Zoned block device information. Reads of this information must be * protected with blk_queue_enter() / blk_queue_exit(). Modifying this * information is only allowed while no requests are being processed. * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). */ unsigned int nr_zones; unsigned int zone_capacity; unsigned int last_zone_capacity; unsigned long __rcu *conv_zones_bitmap; unsigned int zone_wplugs_hash_bits; atomic_t nr_zone_wplugs; spinlock_t zone_wplugs_lock; struct mempool_s *zone_wplugs_pool; struct hlist_head *zone_wplugs_hash; struct workqueue_struct *zone_wplugs_wq; #endif /* CONFIG_BLK_DEV_ZONED */ #if IS_ENABLED(CONFIG_CDROM) struct cdrom_device_info *cdi; #endif int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; u64 diskseq; blk_mode_t open_mode; /* * Independent sector access ranges. This is always NULL for * devices that do not have multiple independent access ranges. */ struct blk_independent_access_ranges *ia_ranges; }; /** * disk_openers - returns how many openers are there for a disk * @disk: disk to check * * This returns the number of openers for a disk. Note that this value is only * stable if disk->open_mutex is held. * * Note: Due to a quirk in the block layer open code, each open partition is * only counted once even if there are multiple openers. */ static inline unsigned int disk_openers(struct gendisk *disk) { return atomic_read(&disk->part0->bd_openers); } /** * disk_has_partscan - return %true if partition scanning is enabled on a disk * @disk: disk to check * * Returns %true if partitions scanning is enabled for @disk, or %false if * partition scanning is disabled either permanently or temporarily. */ static inline bool disk_has_partscan(struct gendisk *disk) { return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); } /* * The gendisk is refcounted by the part0 block_device, and the bd_device * therein is also used for device model presentation in sysfs. */ #define dev_to_disk(device) \ (dev_to_bdev(device)->bd_disk) #define disk_to_dev(disk) \ (&((disk)->part0->bd_device)) #if IS_REACHABLE(CONFIG_CDROM) #define disk_to_cdi(disk) ((disk)->cdi) #else #define disk_to_cdi(disk) NULL #endif static inline dev_t disk_devt(struct gendisk *disk) { return MKDEV(disk->major, disk->first_minor); } /* * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER) * however we constrain this to what we can validate and test. */ #define BLK_MAX_BLOCK_SIZE SZ_64K /* blk_validate_limits() validates bsize, so drivers don't usually need to */ static inline int blk_validate_block_size(unsigned long bsize) { if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize)) return -EINVAL; return 0; } static inline bool blk_op_is_passthrough(blk_opf_t op) { op &= REQ_OP_MASK; return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; } /* flags set by the driver in queue_limits.features */ typedef unsigned int __bitwise blk_features_t; /* supports a volatile write cache */ #define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0)) /* supports passing on the FUA bit */ #define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1)) /* rotational device (hard drive or floppy) */ #define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2)) /* contributes to the random number pool */ #define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3)) /* do disk/partitions IO accounting */ #define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4)) /* don't modify data until writeback is done */ #define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5)) /* always completes in submit context */ #define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6)) /* supports REQ_NOWAIT */ #define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7)) /* supports DAX */ #define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8)) /* supports I/O polling */ #define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9)) /* is a zoned device */ #define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10)) /* supports PCI(e) p2p requests */ #define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12)) /* skip this queue in blk_mq_(un)quiesce_tagset */ #define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13)) /* bounce all highmem pages */ #define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14)) /* undocumented magic for bcache */ #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ ((__force blk_features_t)(1u << 15)) /* atomic writes enabled */ #define BLK_FEAT_ATOMIC_WRITES \ ((__force blk_features_t)(1u << 16)) /* * Flags automatically inherited when stacking limits. */ #define BLK_FEAT_INHERIT_MASK \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE) /* internal flags in queue_limits.flags */ typedef unsigned int __bitwise blk_flags_t; /* do not send FLUSH/FUA commands despite advertising a write cache */ #define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0)) /* I/O topology is misaligned */ #define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) /* passthrough command IO accounting */ #define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2)) struct queue_limits { blk_features_t features; blk_flags_t flags; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; unsigned int max_hw_sectors; unsigned int max_dev_sectors; unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_user_sectors; unsigned int max_segment_size; unsigned int min_segment_size; unsigned int physical_block_size; unsigned int logical_block_size; unsigned int alignment_offset; unsigned int io_min; unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; unsigned int max_user_discard_sectors; unsigned int max_secure_erase_sectors; unsigned int max_write_zeroes_sectors; unsigned int max_hw_zone_append_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; unsigned int zone_write_granularity; /* atomic write limits */ unsigned int atomic_write_hw_max; unsigned int atomic_write_max_sectors; unsigned int atomic_write_hw_boundary; unsigned int atomic_write_boundary_sectors; unsigned int atomic_write_hw_unit_min; unsigned int atomic_write_unit_min; unsigned int atomic_write_hw_unit_max; unsigned int atomic_write_unit_max; unsigned short max_segments; unsigned short max_integrity_segments; unsigned short max_discard_segments; unsigned int max_open_zones; unsigned int max_active_zones; /* * Drivers that set dma_alignment to less than 511 must be prepared to * handle individual bvec's that are not a multiple of a SECTOR_SIZE * due to possible offsets. */ unsigned int dma_alignment; unsigned int dma_pad_mask; struct blk_integrity integrity; }; typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, void *data); #define BLK_ALL_ZONES ((unsigned int)-1) int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sectors, sector_t nr_sectors); int blk_revalidate_disk_zones(struct gendisk *disk); /* * Independent access ranges: struct blk_independent_access_range describes * a range of contiguous sectors that can be accessed using device command * execution resources that are independent from the resources used for * other access ranges. This is typically found with single-LUN multi-actuator * HDDs where each access range is served by a different set of heads. * The set of independent ranges supported by the device is defined using * struct blk_independent_access_ranges. The independent ranges must not overlap * and must include all sectors within the disk capacity (no sector holes * allowed). * For a device with multiple ranges, requests targeting sectors in different * ranges can be executed in parallel. A request can straddle an access range * boundary. */ struct blk_independent_access_range { struct kobject kobj; sector_t sector; sector_t nr_sectors; }; struct blk_independent_access_ranges { struct kobject kobj; bool sysfs_registered; unsigned int nr_ia_ranges; struct blk_independent_access_range ia_range[]; }; struct request_queue { /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ void *queuedata; struct elevator_queue *elevator; const struct blk_mq_ops *mq_ops; /* sw queues */ struct blk_mq_ctx __percpu *queue_ctx; /* * various queue flags, see QUEUE_* below */ unsigned long queue_flags; unsigned int rq_timeout; unsigned int queue_depth; refcount_t refs; /* hw dispatch queues */ unsigned int nr_hw_queues; struct xarray hctx_table; struct percpu_ref q_usage_counter; struct lock_class_key io_lock_cls_key; struct lockdep_map io_lockdep_map; struct lock_class_key q_lock_cls_key; struct lockdep_map q_lockdep_map; struct request *last_merge; spinlock_t queue_lock; int quiesce_depth; struct gendisk *disk; /* * mq queue kobject */ struct kobject *mq_kobj; struct queue_limits limits; #ifdef CONFIG_PM struct device *dev; enum rpm_status rpm_status; #endif /* * Number of contexts that have called blk_set_pm_only(). If this * counter is above zero then only RQF_PM requests are processed. */ atomic_t pm_only; struct blk_queue_stats *stats; struct rq_qos *rq_qos; struct mutex rq_qos_mutex; /* * ida allocated id for this queue. Used to index queues from * ioctx. */ int id; /* * queue settings */ unsigned long nr_requests; /* Max # of requests */ #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct blk_crypto_profile *crypto_profile; struct kobject *crypto_kobject; #endif struct timer_list timeout; struct work_struct timeout_work; atomic_t nr_active_requests_shared_tags; struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); struct blkcg_gq *root_blkg; struct list_head blkg_list; struct mutex blkcg_mutex; #endif int node; spinlock_t requeue_lock; struct list_head requeue_list; struct delayed_work requeue_work; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace __rcu *blk_trace; #endif /* * for flush operations */ struct blk_flush_queue *fq; struct list_head flush_list; /* * Protects against I/O scheduler switching, particularly when updating * q->elevator. Since the elevator update code path may also modify q-> * nr_requests and wbt latency, this lock also protects the sysfs attrs * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update * may modify hctx tags, reserved-tags and cpumask, so this lock also * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking * order during an elevator or nr_hw_queue update, first freeze the * queue, then acquire ->elevator_lock. */ struct mutex elevator_lock; struct mutex sysfs_lock; /* * Protects queue limits and also sysfs attribute read_ahead_kb. */ struct mutex limits_lock; /* * for reusing dead hctx instance in case of updating * nr_hw_queues */ struct list_head unused_hctx_list; spinlock_t unused_hctx_lock; int mq_freeze_depth; #ifdef CONFIG_BLK_DEV_THROTTLING /* Throttle data */ struct throtl_data *td; #endif struct rcu_head rcu_head; #ifdef CONFIG_LOCKDEP struct task_struct *mq_freeze_owner; int mq_freeze_owner_depth; /* * Records disk & queue state in current context, used in unfreeze * queue */ bool mq_freeze_disk_dead; bool mq_freeze_queue_dying; #endif wait_queue_head_t mq_freeze_wq; /* * Protect concurrent access to q_usage_counter by * percpu_ref_kill() and percpu_ref_reinit(). */ struct mutex mq_freeze_lock; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; /* * Serializes all debugfs metadata operations using the above dentries. */ struct mutex debugfs_mutex; }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ enum { QUEUE_FLAG_DYING, /* queue being torn down */ QUEUE_FLAG_NOMERGES, /* disable merge attempts */ QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */ QUEUE_FLAG_FAIL_IO, /* fake timeout */ QUEUE_FLAG_NOXMERGES, /* No extended merges */ QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */ QUEUE_FLAG_INIT_DONE, /* queue is initialized */ QUEUE_FLAG_STATS, /* track IO start and completion times */ QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */ QUEUE_FLAG_QUIESCED, /* queue has been quiesced */ QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */ QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */ QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */ QUEUE_FLAG_MAX }; #define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) #define blk_queue_passthrough_stat(q) \ ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) #ifdef CONFIG_BLK_RQ_ALLOC_TIME #define blk_queue_rq_alloc_time(q) \ test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) #else #define blk_queue_rq_alloc_time(q) false #endif #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ REQ_FAILFAST_DRIVER)) #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) #define blk_queue_skip_tagset_quiesce(q) \ ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define dma_map_bvec(dev, bv, dir, attrs) \ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ (dir), (attrs)) static inline bool queue_is_mq(struct request_queue *q) { return q->mq_ops; } #ifdef CONFIG_PM static inline enum rpm_status queue_rpm_status(struct request_queue *q) { return q->rpm_status; } #else static inline enum rpm_status queue_rpm_status(struct request_queue *q) { return RPM_ACTIVE; } #endif static inline bool blk_queue_is_zoned(struct request_queue *q) { return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && (q->limits.features & BLK_FEAT_ZONED); } #ifdef CONFIG_BLK_DEV_ZONED static inline unsigned int disk_nr_zones(struct gendisk *disk) { return disk->nr_zones; } bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); #else /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int disk_nr_zones(struct gendisk *disk) { return 0; } static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) { return false; } #endif /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) { if (!blk_queue_is_zoned(disk->queue)) return 0; return sector >> ilog2(disk->queue->limits.chunk_sectors); } static inline unsigned int bdev_nr_zones(struct block_device *bdev) { return disk_nr_zones(bdev->bd_disk); } static inline unsigned int bdev_max_open_zones(struct block_device *bdev) { return bdev->bd_disk->queue->limits.max_open_zones; } static inline unsigned int bdev_max_active_zones(struct block_device *bdev) { return bdev->bd_disk->queue->limits.max_active_zones; } static inline unsigned int blk_queue_depth(struct request_queue *q) { if (q->queue_depth) return q->queue_depth; return q->nr_requests; } /* * default timeout for SG_IO if none specified */ #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) #define BLK_MIN_SG_TIMEOUT (7 * HZ) /* This should not be used directly - use rq_for_each_segment */ #define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, const struct attribute_group **groups, struct fwnode_handle *fwnode); int __must_check device_add_disk(struct device *parent, struct gendisk *disk, const struct attribute_group **groups); static inline int __must_check add_disk(struct gendisk *disk) { return device_add_disk(NULL, disk, NULL); } void del_gendisk(struct gendisk *gp); void invalidate_disk(struct gendisk *disk); void set_disk_ro(struct gendisk *disk, bool read_only); void disk_uevent(struct gendisk *disk, enum kobject_action action); static inline u8 bdev_partno(const struct block_device *bdev) { return atomic_read(&bdev->__bd_flags) & BD_PARTNO; } static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) { return atomic_read(&bdev->__bd_flags) & flag; } static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) { atomic_or(flag, &bdev->__bd_flags); } static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) { atomic_andnot(flag, &bdev->__bd_flags); } static inline bool get_disk_ro(struct gendisk *disk) { return bdev_test_flag(disk->part0, BD_READ_ONLY) || test_bit(GD_READ_ONLY, &disk->state); } static inline bool bdev_read_only(struct block_device *bdev) { return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); } bool set_capacity_and_notify(struct gendisk *disk, sector_t size); void disk_force_media_change(struct gendisk *disk); void bdev_mark_dead(struct block_device *bdev, bool surprise); void add_disk_randomness(struct gendisk *disk) __latent_entropy; void rand_initialize_disk(struct gendisk *disk); static inline sector_t get_start_sect(struct block_device *bdev) { return bdev->bd_start_sect; } static inline sector_t bdev_nr_sectors(struct block_device *bdev) { return bdev->bd_nr_sectors; } static inline loff_t bdev_nr_bytes(struct block_device *bdev) { return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; } static inline sector_t get_capacity(struct gendisk *disk) { return bdev_nr_sectors(disk->part0); } static inline u64 sb_bdev_nr_blocks(struct super_block *sb) { return bdev_nr_sectors(sb->s_bdev) >> (sb->s_blocksize_bits - SECTOR_SHIFT); } int bdev_disk_changed(struct gendisk *disk, bool invalidate); void put_disk(struct gendisk *disk); struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node, struct lock_class_key *lkclass); /** * blk_alloc_disk - allocate a gendisk structure * @lim: queue limits to be used for this disk. * @node_id: numa node to allocate on * * Allocate and pre-initialize a gendisk structure for use with BIO based * drivers. * * Returns an ERR_PTR on error, else the allocated disk. * * Context: can sleep */ #define blk_alloc_disk(lim, node_id) \ ({ \ static struct lock_class_key __key; \ \ __blk_alloc_disk(lim, node_id, &__key); \ }) int __register_blkdev(unsigned int major, const char *name, void (*probe)(dev_t devt)); #define register_blkdev(major, name) \ __register_blkdev(major, name, NULL) void unregister_blkdev(unsigned int major, const char *name); bool disk_check_media_change(struct gendisk *disk); void set_capacity(struct gendisk *disk, sector_t size); #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); #else static inline int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) { return 0; } static inline void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { } #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ dev_t part_devt(struct gendisk *disk, u8 partno); void inc_diskseq(struct gendisk *disk); void blk_request_module(dev_t devt); extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); void submit_bio_noacct(struct bio *bio); struct bio *bio_split_to_limits(struct bio *bio); extern int blk_lld_busy(struct request_queue *q); extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); extern void blk_queue_exit(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); /* Helper to convert REQ_OP_XXX to its string format XXX */ extern const char *blk_op_str(enum req_op op); int blk_status_to_errno(blk_status_t status); blk_status_t errno_to_blk_status(int errno); const char *blk_status_to_str(blk_status_t status); /* only poll the hardware once, don't continue until a completion was found */ #define BLK_POLL_ONESHOT (1 << 0) int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { return bdev->bd_queue; /* this is never NULL */ } /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); static inline unsigned int bio_zone_no(struct bio *bio) { return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); } static inline bool bio_straddles_zones(struct bio *bio) { return bio_sectors(bio) && bio_zone_no(bio) != disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); } /* * Return how much within the boundary is left to be used for I/O at a given * offset. */ static inline unsigned int blk_boundary_sectors_left(sector_t offset, unsigned int boundary_sectors) { if (unlikely(!is_power_of_2(boundary_sectors))) return boundary_sectors - sector_div(offset, boundary_sectors); return boundary_sectors - (offset & (boundary_sectors - 1)); } /** * queue_limits_start_update - start an atomic update of queue limits * @q: queue to update * * This functions starts an atomic update of the queue limits. It takes a lock * to prevent other updates and returns a snapshot of the current limits that * the caller can modify. The caller must call queue_limits_commit_update() * to finish the update. * * Context: process context. */ static inline struct queue_limits queue_limits_start_update(struct request_queue *q) { mutex_lock(&q->limits_lock); return q->limits; } int queue_limits_commit_update_frozen(struct request_queue *q, struct queue_limits *lim); int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim); int queue_limits_set(struct request_queue *q, struct queue_limits *lim); int blk_validate_limits(struct queue_limits *lim); /** * queue_limits_cancel_update - cancel an atomic update of queue limits * @q: queue to update * * This functions cancels an atomic update of the queue limits started by * queue_limits_start_update() and should be used when an error occurs after * starting update. */ static inline void queue_limits_cancel_update(struct request_queue *q) { mutex_unlock(&q->limits_lock); } /* * These helpers are for drivers that have sloppy feature negotiation and might * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O * completion handler when the device returned an indicator that the respective * feature is not actually supported. They are racy and the driver needs to * cope with that. Try to avoid this scheme if you can. */ static inline void blk_queue_disable_discard(struct request_queue *q) { q->limits.max_discard_sectors = 0; } static inline void blk_queue_disable_secure_erase(struct request_queue *q) { q->limits.max_secure_erase_sectors = 0; } static inline void blk_queue_disable_write_zeroes(struct request_queue *q) { q->limits.max_write_zeroes_sectors = 0; } /* * Access functions for manipulating queue properties */ extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, sector_t offset, const char *pfx); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); struct blk_independent_access_ranges * disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); void disk_set_independent_access_ranges(struct gendisk *disk, struct blk_independent_access_ranges *iars); bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); void blk_mark_disk_dead(struct gendisk *disk); struct rq_list { struct request *head; struct request *tail; }; #ifdef CONFIG_BLOCK /* * blk_plug permits building a queue of related requests by holding the I/O * fragments for a short period. This allows merging of sequential requests * into single larger request. As the requests are moved from a per-task list to * the device's request_queue in a batch, this results in improved scalability * as the lock contention for request_queue lock is reduced. * * It is ok not to disable preemption when adding the request to the plug list * or when attempting a merge. For details, please see schedule() where * blk_flush_plug() is called. */ struct blk_plug { struct rq_list mq_list; /* blk-mq requests */ /* if ios_left is > 1, we can batch tag/rq allocations */ struct rq_list cached_rqs; u64 cur_ktime; unsigned short nr_ios; unsigned short rq_count; bool multiple_queues; bool has_elevator; struct list_head cb_list; /* md requires an unplug callback */ }; struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); struct blk_plug_cb { struct list_head list; blk_plug_cb_fn callback; void *data; }; extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, int size); extern void blk_start_plug(struct blk_plug *); extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); extern void blk_finish_plug(struct blk_plug *); void __blk_flush_plug(struct blk_plug *plug, bool from_schedule); static inline void blk_flush_plug(struct blk_plug *plug, bool async) { if (plug) __blk_flush_plug(plug, async); } /* * tsk == current here */ static inline void blk_plug_invalidate_ts(struct task_struct *tsk) { struct blk_plug *plug = tsk->plug; if (plug) plug->cur_ktime = 0; current->flags &= ~PF_BLOCK_TS; } int blkdev_issue_flush(struct block_device *bdev); long nr_blockdev_pages(void); #else /* CONFIG_BLOCK */ struct blk_plug { }; static inline void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) { } static inline void blk_start_plug(struct blk_plug *plug) { } static inline void blk_finish_plug(struct blk_plug *plug) { } static inline void blk_flush_plug(struct blk_plug *plug, bool async) { } static inline void blk_plug_invalidate_ts(struct task_struct *tsk) { } static inline int blkdev_issue_flush(struct block_device *bdev) { return 0; } static inline long nr_blockdev_pages(void) { return 0; } #endif /* CONFIG_BLOCK */ extern void blk_io_schedule(void); int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask); int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop); int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp); #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */ #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */ #define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */ extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags); extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags); static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) { return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - SECTOR_SHIFT), nr_blocks << (sb->s_blocksize_bits - SECTOR_SHIFT), gfp_mask); } static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) { return blkdev_issue_zeroout(sb->s_bdev, block << (sb->s_blocksize_bits - SECTOR_SHIFT), nr_blocks << (sb->s_blocksize_bits - SECTOR_SHIFT), gfp_mask, 0); } static inline bool bdev_is_partition(struct block_device *bdev) { return bdev_partno(bdev) != 0; } enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, BLK_MAX_SEGMENT_SIZE = 65536, BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; /* * Default upper limit for the software max_sectors limit used for * regular file system I/O. This can be increased through sysfs. * * Not to be confused with the max_hw_sector limit that is entirely * controlled by the driver, usually based on hardware limits. */ #define BLK_DEF_MAX_SECTORS_CAP 2560u static inline struct queue_limits *bdev_limits(struct block_device *bdev) { return &bdev_get_queue(bdev)->limits; } static inline unsigned long queue_segment_boundary(const struct request_queue *q) { return q->limits.seg_boundary_mask; } static inline unsigned long queue_virt_boundary(const struct request_queue *q) { return q->limits.virt_boundary_mask; } static inline unsigned int queue_max_sectors(const struct request_queue *q) { return q->limits.max_sectors; } static inline unsigned int queue_max_bytes(struct request_queue *q) { return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; } static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) { return q->limits.max_hw_sectors; } static inline unsigned short queue_max_segments(const struct request_queue *q) { return q->limits.max_segments; } static inline unsigned short queue_max_discard_segments(const struct request_queue *q) { return q->limits.max_discard_segments; } static inline unsigned int queue_max_segment_size(const struct request_queue *q) { return q->limits.max_segment_size; } static inline bool queue_emulates_zone_append(struct request_queue *q) { return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; } static inline bool bdev_emulates_zone_append(struct block_device *bdev) { return queue_emulates_zone_append(bdev_get_queue(bdev)); } static inline unsigned int bdev_max_zone_append_sectors(struct block_device *bdev) { return bdev_limits(bdev)->max_zone_append_sectors; } static inline unsigned int bdev_max_segments(struct block_device *bdev) { return queue_max_segments(bdev_get_queue(bdev)); } static inline unsigned queue_logical_block_size(const struct request_queue *q) { return q->limits.logical_block_size; } static inline unsigned int bdev_logical_block_size(struct block_device *bdev) { return queue_logical_block_size(bdev_get_queue(bdev)); } static inline unsigned int queue_physical_block_size(const struct request_queue *q) { return q->limits.physical_block_size; } static inline unsigned int bdev_physical_block_size(struct block_device *bdev) { return queue_physical_block_size(bdev_get_queue(bdev)); } static inline unsigned int queue_io_min(const struct request_queue *q) { return q->limits.io_min; } static inline unsigned int bdev_io_min(struct block_device *bdev) { return queue_io_min(bdev_get_queue(bdev)); } static inline unsigned int queue_io_opt(const struct request_queue *q) { return q->limits.io_opt; } static inline unsigned int bdev_io_opt(struct block_device *bdev) { return queue_io_opt(bdev_get_queue(bdev)); } static inline unsigned int queue_zone_write_granularity(const struct request_queue *q) { return q->limits.zone_write_granularity; } static inline unsigned int bdev_zone_write_granularity(struct block_device *bdev) { return queue_zone_write_granularity(bdev_get_queue(bdev)); } int bdev_alignment_offset(struct block_device *bdev); unsigned int bdev_discard_alignment(struct block_device *bdev); static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) { return bdev_limits(bdev)->max_discard_sectors; } static inline unsigned int bdev_discard_granularity(struct block_device *bdev) { return bdev_limits(bdev)->discard_granularity; } static inline unsigned int bdev_max_secure_erase_sectors(struct block_device *bdev) { return bdev_limits(bdev)->max_secure_erase_sectors; } static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) { return bdev_limits(bdev)->max_write_zeroes_sectors; } static inline bool bdev_nonrot(struct block_device *bdev) { return blk_queue_nonrot(bdev_get_queue(bdev)); } static inline bool bdev_synchronous(struct block_device *bdev) { return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; } static inline bool bdev_stable_writes(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) return true; return q->limits.features & BLK_FEAT_STABLE_WRITES; } static inline bool blk_queue_write_cache(struct request_queue *q) { return (q->limits.features & BLK_FEAT_WRITE_CACHE) && !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); } static inline bool bdev_write_cache(struct block_device *bdev) { return blk_queue_write_cache(bdev_get_queue(bdev)); } static inline bool bdev_fua(struct block_device *bdev) { return bdev_limits(bdev)->features & BLK_FEAT_FUA; } static inline bool bdev_nowait(struct block_device *bdev) { return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; } static inline bool bdev_is_zoned(struct block_device *bdev) { return blk_queue_is_zoned(bdev_get_queue(bdev)); } static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) { return disk_zone_no(bdev->bd_disk, sec); } static inline sector_t bdev_zone_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); if (!blk_queue_is_zoned(q)) return 0; return q->limits.chunk_sectors; } static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, sector_t sector) { return sector & (bdev_zone_sectors(bdev) - 1); } static inline sector_t bio_offset_from_zone_start(struct bio *bio) { return bdev_offset_from_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector); } static inline bool bdev_is_zone_start(struct block_device *bdev, sector_t sector) { return bdev_offset_from_zone_start(bdev, sector) == 0; } /** * bdev_zone_is_seq - check if a sector belongs to a sequential write zone * @bdev: block device to check * @sector: sector number * * Check if @sector on @bdev is contained in a sequential write required zone. */ static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) { bool is_seq = false; #if IS_ENABLED(CONFIG_BLK_DEV_ZONED) if (bdev_is_zoned(bdev)) { struct gendisk *disk = bdev->bd_disk; unsigned long *bitmap; rcu_read_lock(); bitmap = rcu_dereference(disk->conv_zones_bitmap); is_seq = !bitmap || !test_bit(disk_zone_no(disk, sector), bitmap); rcu_read_unlock(); } #endif return is_seq; } int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask); static inline unsigned int queue_dma_alignment(const struct request_queue *q) { return q->limits.dma_alignment; } static inline unsigned int queue_atomic_write_unit_max_bytes(const struct request_queue *q) { return q->limits.atomic_write_unit_max; } static inline unsigned int queue_atomic_write_unit_min_bytes(const struct request_queue *q) { return q->limits.atomic_write_unit_min; } static inline unsigned int queue_atomic_write_boundary_bytes(const struct request_queue *q) { return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; } static inline unsigned int queue_atomic_write_max_bytes(const struct request_queue *q) { return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; } static inline unsigned int bdev_dma_alignment(struct block_device *bdev) { return queue_dma_alignment(bdev_get_queue(bdev)); } static inline bool bdev_iter_is_aligned(struct block_device *bdev, struct iov_iter *iter) { return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev), bdev_logical_block_size(bdev) - 1); } static inline unsigned int blk_lim_dma_alignment_and_pad(struct queue_limits *lim) { return lim->dma_alignment | lim->dma_pad_mask; } static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr, unsigned int len) { unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); return !(addr & alignment) && !(len & alignment); } /* assumes size > 256 */ static inline unsigned int blksize_bits(unsigned int size) { return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; } int kblockd_schedule_work(struct work_struct *work); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*") #ifdef CONFIG_BLK_INLINE_ENCRYPTION bool blk_crypto_register(struct blk_crypto_profile *profile, struct request_queue *q); #else /* CONFIG_BLK_INLINE_ENCRYPTION */ static inline bool blk_crypto_register(struct blk_crypto_profile *profile, struct request_queue *q) { return true; } #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ enum blk_unique_id { /* these match the Designator Types specified in SPC */ BLK_UID_T10 = 1, BLK_UID_EUI64 = 2, BLK_UID_NAA = 3, }; struct block_device_operations { void (*submit_bio)(struct bio *bio); int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); int (*open)(struct gendisk *disk, blk_mode_t mode); void (*release)(struct gendisk *disk); int (*ioctl)(struct block_device *bdev, blk_mode_t mode, unsigned cmd, unsigned long arg); int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode, unsigned cmd, unsigned long arg); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); void (*unlock_native_capacity) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); int (*set_read_only)(struct block_device *bdev, bool ro); void (*free_disk)(struct gendisk *disk); /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); int (*report_zones)(struct gendisk *, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); char *(*devnode)(struct gendisk *disk, umode_t *mode); /* returns the length of the identifier or a negative errno: */ int (*get_unique_id)(struct gendisk *disk, u8 id[16], enum blk_unique_id id_type); struct module *owner; const struct pr_ops *pr_ops; /* * Special callback for probing GPT entry at a given sector. * Needed by Android devices, used by GPT scanner and MMC blk * driver. */ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); }; #ifdef CONFIG_COMPAT extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t, unsigned int, unsigned long); #else #define blkdev_compat_ptr_ioctl NULL #endif static inline void blk_wake_io_task(struct task_struct *waiter) { /* * If we're polling, the task itself is doing the completions. For * that case, we don't need to signal a wakeup, it's enough to just * mark us as RUNNING. */ if (waiter == current) __set_current_state(TASK_RUNNING); else wake_up_process(waiter); } unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, unsigned long start_time); void bdev_end_io_acct(struct block_device *bdev, enum req_op op, unsigned int sectors, unsigned long start_time); unsigned long bio_start_io_acct(struct bio *bio); void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, struct block_device *orig_bdev); /** * bio_end_io_acct - end I/O accounting for bio based drivers * @bio: bio to end account for * @start_time: start time returned by bio_start_io_acct() */ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) { return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); } int set_blocksize(struct file *file, int size); int lookup_bdev(const char *pathname, dev_t *dev); void blkdev_show(struct seq_file *seqf, off_t offset); #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ #ifdef CONFIG_BLOCK #define BLKDEV_MAJOR_MAX 512 #else #define BLKDEV_MAJOR_MAX 0 #endif struct blk_holder_ops { void (*mark_dead)(struct block_device *bdev, bool surprise); /* * Sync the file system mounted on the block device. */ void (*sync)(struct block_device *bdev); /* * Freeze the file system mounted on the block device. */ int (*freeze)(struct block_device *bdev); /* * Thaw the file system mounted on the block device. */ int (*thaw)(struct block_device *bdev); }; /* * For filesystems using @fs_holder_ops, the @holder argument passed to * helpers used to open and claim block devices via * bd_prepare_to_claim() must point to a superblock. */ extern const struct blk_holder_ops fs_holder_ops; /* * Return the correct open flags for blkdev_get_by_* for super block flags * as stored in sb->s_flags. */ #define sb_open_mode(flags) \ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE)) struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops); int bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops); void bd_abort_claiming(struct block_device *bdev, void *holder); /* just for blk-cgroup, don't use elsewhere */ struct block_device *blkdev_get_no_open(dev_t dev); void blkdev_put_no_open(struct block_device *bdev); struct block_device *I_BDEV(struct inode *inode); struct block_device *file_bdev(struct file *bdev_file); bool disk_live(struct gendisk *disk); unsigned int block_size(struct block_device *bdev); #ifdef CONFIG_BLOCK void invalidate_bdev(struct block_device *bdev); int sync_blockdev(struct block_device *bdev); int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend); int sync_blockdev_nowait(struct block_device *bdev); void sync_bdevs(bool wait); void bdev_statx(struct path *, struct kstat *, u32); void printk_all_partitions(void); int __init early_lookup_bdev(const char *pathname, dev_t *dev); #else static inline void invalidate_bdev(struct block_device *bdev) { } static inline int sync_blockdev(struct block_device *bdev) { return 0; } static inline int sync_blockdev_nowait(struct block_device *bdev) { return 0; } static inline void sync_bdevs(bool wait) { } static inline void bdev_statx(struct path *path, struct kstat *stat, u32 request_mask) { } static inline void printk_all_partitions(void) { } static inline int early_lookup_bdev(const char *pathname, dev_t *dev) { return -EINVAL; } #endif /* CONFIG_BLOCK */ int bdev_freeze(struct block_device *bdev); int bdev_thaw(struct block_device *bdev); void bdev_fput(struct file *bdev_file); struct io_comp_batch { struct rq_list req_list; bool need_ts; void (*complete)(struct io_comp_batch *); }; static inline bool blk_atomic_write_start_sect_aligned(sector_t sector, struct queue_limits *limits) { unsigned int alignment = max(limits->atomic_write_hw_unit_min, limits->atomic_write_hw_boundary); return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT); } static inline bool bdev_can_atomic_write(struct block_device *bdev) { struct request_queue *bd_queue = bdev->bd_queue; struct queue_limits *limits = &bd_queue->limits; if (!limits->atomic_write_unit_min) return false; if (bdev_is_partition(bdev)) return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect, limits); return true; } static inline unsigned int bdev_atomic_write_unit_min_bytes(struct block_device *bdev) { if (!bdev_can_atomic_write(bdev)) return 0; return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev)); } static inline unsigned int bdev_atomic_write_unit_max_bytes(struct block_device *bdev) { if (!bdev_can_atomic_write(bdev)) return 0; return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev)); } #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } #endif /* _LINUX_BLKDEV_H */
2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 2 2 2 1 1 1 2 3 3 3 3 1 2 2 2 2 2 2 2 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for Nintendo Switch Joy-Cons and Pro Controllers * * Copyright (c) 2019-2021 Daniel J. Ogorchock <djogorchock@gmail.com> * Portions Copyright (c) 2020 Nadia Holmquist Pedersen <nadia@nhp.sh> * Copyright (c) 2022 Emily Strickland <linux@emily.st> * Copyright (c) 2023 Ryan McClelland <rymcclel@gmail.com> * * The following resources/projects were referenced for this driver: * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering * https://gitlab.com/pjranki/joycon-linux-kernel (Peter Rankin) * https://github.com/FrotBot/SwitchProConLinuxUSB * https://github.com/MTCKC/ProconXInput * https://github.com/Davidobot/BetterJoyForCemu * hid-wiimote kernel hid driver * hid-logitech-hidpp driver * hid-sony driver * * This driver supports the Nintendo Switch Joy-Cons and Pro Controllers. The * Pro Controllers can either be used over USB or Bluetooth. * * This driver also incorporates support for Nintendo Switch Online controllers * for the NES, SNES, Sega Genesis, and N64. * * The driver will retrieve the factory calibration info from the controllers, * so little to no user calibration should be required. * */ #include "hid-ids.h" #include <linux/unaligned.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/hid.h> #include <linux/idr.h> #include <linux/input.h> #include <linux/jiffies.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/spinlock.h> /* * Reference the url below for the following HID report defines: * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering */ /* Output Reports */ #define JC_OUTPUT_RUMBLE_AND_SUBCMD 0x01 #define JC_OUTPUT_FW_UPDATE_PKT 0x03 #define JC_OUTPUT_RUMBLE_ONLY 0x10 #define JC_OUTPUT_MCU_DATA 0x11 #define JC_OUTPUT_USB_CMD 0x80 /* Subcommand IDs */ #define JC_SUBCMD_STATE 0x00 #define JC_SUBCMD_MANUAL_BT_PAIRING 0x01 #define JC_SUBCMD_REQ_DEV_INFO 0x02 #define JC_SUBCMD_SET_REPORT_MODE 0x03 #define JC_SUBCMD_TRIGGERS_ELAPSED 0x04 #define JC_SUBCMD_GET_PAGE_LIST_STATE 0x05 #define JC_SUBCMD_SET_HCI_STATE 0x06 #define JC_SUBCMD_RESET_PAIRING_INFO 0x07 #define JC_SUBCMD_LOW_POWER_MODE 0x08 #define JC_SUBCMD_SPI_FLASH_READ 0x10 #define JC_SUBCMD_SPI_FLASH_WRITE 0x11 #define JC_SUBCMD_RESET_MCU 0x20 #define JC_SUBCMD_SET_MCU_CONFIG 0x21 #define JC_SUBCMD_SET_MCU_STATE 0x22 #define JC_SUBCMD_SET_PLAYER_LIGHTS 0x30 #define JC_SUBCMD_GET_PLAYER_LIGHTS 0x31 #define JC_SUBCMD_SET_HOME_LIGHT 0x38 #define JC_SUBCMD_ENABLE_IMU 0x40 #define JC_SUBCMD_SET_IMU_SENSITIVITY 0x41 #define JC_SUBCMD_WRITE_IMU_REG 0x42 #define JC_SUBCMD_READ_IMU_REG 0x43 #define JC_SUBCMD_ENABLE_VIBRATION 0x48 #define JC_SUBCMD_GET_REGULATED_VOLTAGE 0x50 /* Input Reports */ #define JC_INPUT_BUTTON_EVENT 0x3F #define JC_INPUT_SUBCMD_REPLY 0x21 #define JC_INPUT_IMU_DATA 0x30 #define JC_INPUT_MCU_DATA 0x31 #define JC_INPUT_USB_RESPONSE 0x81 /* Feature Reports */ #define JC_FEATURE_LAST_SUBCMD 0x02 #define JC_FEATURE_OTA_FW_UPGRADE 0x70 #define JC_FEATURE_SETUP_MEM_READ 0x71 #define JC_FEATURE_MEM_READ 0x72 #define JC_FEATURE_ERASE_MEM_SECTOR 0x73 #define JC_FEATURE_MEM_WRITE 0x74 #define JC_FEATURE_LAUNCH 0x75 /* USB Commands */ #define JC_USB_CMD_CONN_STATUS 0x01 #define JC_USB_CMD_HANDSHAKE 0x02 #define JC_USB_CMD_BAUDRATE_3M 0x03 #define JC_USB_CMD_NO_TIMEOUT 0x04 #define JC_USB_CMD_EN_TIMEOUT 0x05 #define JC_USB_RESET 0x06 #define JC_USB_PRE_HANDSHAKE 0x91 #define JC_USB_SEND_UART 0x92 /* Magic value denoting presence of user calibration */ #define JC_CAL_USR_MAGIC_0 0xB2 #define JC_CAL_USR_MAGIC_1 0xA1 #define JC_CAL_USR_MAGIC_SIZE 2 /* SPI storage addresses of user calibration data */ #define JC_CAL_USR_LEFT_MAGIC_ADDR 0x8010 #define JC_CAL_USR_LEFT_DATA_ADDR 0x8012 #define JC_CAL_USR_LEFT_DATA_END 0x801A #define JC_CAL_USR_RIGHT_MAGIC_ADDR 0x801B #define JC_CAL_USR_RIGHT_DATA_ADDR 0x801D #define JC_CAL_STICK_DATA_SIZE \ (JC_CAL_USR_LEFT_DATA_END - JC_CAL_USR_LEFT_DATA_ADDR + 1) /* SPI storage addresses of factory calibration data */ #define JC_CAL_FCT_DATA_LEFT_ADDR 0x603d #define JC_CAL_FCT_DATA_RIGHT_ADDR 0x6046 /* SPI storage addresses of IMU factory calibration data */ #define JC_IMU_CAL_FCT_DATA_ADDR 0x6020 #define JC_IMU_CAL_FCT_DATA_END 0x6037 #define JC_IMU_CAL_DATA_SIZE \ (JC_IMU_CAL_FCT_DATA_END - JC_IMU_CAL_FCT_DATA_ADDR + 1) /* SPI storage addresses of IMU user calibration data */ #define JC_IMU_CAL_USR_MAGIC_ADDR 0x8026 #define JC_IMU_CAL_USR_DATA_ADDR 0x8028 /* The raw analog joystick values will be mapped in terms of this magnitude */ #define JC_MAX_STICK_MAG 32767 #define JC_STICK_FUZZ 250 #define JC_STICK_FLAT 500 /* Hat values for pro controller's d-pad */ #define JC_MAX_DPAD_MAG 1 #define JC_DPAD_FUZZ 0 #define JC_DPAD_FLAT 0 /* Under most circumstances IMU reports are pushed every 15ms; use as default */ #define JC_IMU_DFLT_AVG_DELTA_MS 15 /* How many samples to sum before calculating average IMU report delta */ #define JC_IMU_SAMPLES_PER_DELTA_AVG 300 /* Controls how many dropped IMU packets at once trigger a warning message */ #define JC_IMU_DROPPED_PKT_WARNING 3 /* * The controller's accelerometer has a sensor resolution of 16bits and is * configured with a range of +-8000 milliGs. Therefore, the resolution can be * calculated thus: (2^16-1)/(8000 * 2) = 4.096 digits per milliG * Resolution per G (rather than per millliG): 4.096 * 1000 = 4096 digits per G * Alternatively: 1/4096 = .0002441 Gs per digit */ #define JC_IMU_MAX_ACCEL_MAG 32767 #define JC_IMU_ACCEL_RES_PER_G 4096 #define JC_IMU_ACCEL_FUZZ 10 #define JC_IMU_ACCEL_FLAT 0 /* * The controller's gyroscope has a sensor resolution of 16bits and is * configured with a range of +-2000 degrees/second. * Digits per dps: (2^16 -1)/(2000*2) = 16.38375 * dps per digit: 16.38375E-1 = .0610 * * STMicro recommends in the datasheet to add 15% to the dps/digit. This allows * the full sensitivity range to be saturated without clipping. This yields more * accurate results, so it's the technique this driver uses. * dps per digit (corrected): .0610 * 1.15 = .0702 * digits per dps (corrected): .0702E-1 = 14.247 * * Now, 14.247 truncating to 14 loses a lot of precision, so we rescale the * min/max range by 1000. */ #define JC_IMU_PREC_RANGE_SCALE 1000 /* Note: change mag and res_per_dps if prec_range_scale is ever altered */ #define JC_IMU_MAX_GYRO_MAG 32767000 /* (2^16-1)*1000 */ #define JC_IMU_GYRO_RES_PER_DPS 14247 /* (14.247*1000) */ #define JC_IMU_GYRO_FUZZ 10 #define JC_IMU_GYRO_FLAT 0 /* frequency/amplitude tables for rumble */ struct joycon_rumble_freq_data { u16 high; u8 low; u16 freq; /* Hz*/ }; struct joycon_rumble_amp_data { u8 high; u16 low; u16 amp; }; #if IS_ENABLED(CONFIG_NINTENDO_FF) /* * These tables are from * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md */ static const struct joycon_rumble_freq_data joycon_rumble_frequencies[] = { /* high, low, freq */ { 0x0000, 0x01, 41 }, { 0x0000, 0x02, 42 }, { 0x0000, 0x03, 43 }, { 0x0000, 0x04, 44 }, { 0x0000, 0x05, 45 }, { 0x0000, 0x06, 46 }, { 0x0000, 0x07, 47 }, { 0x0000, 0x08, 48 }, { 0x0000, 0x09, 49 }, { 0x0000, 0x0A, 50 }, { 0x0000, 0x0B, 51 }, { 0x0000, 0x0C, 52 }, { 0x0000, 0x0D, 53 }, { 0x0000, 0x0E, 54 }, { 0x0000, 0x0F, 55 }, { 0x0000, 0x10, 57 }, { 0x0000, 0x11, 58 }, { 0x0000, 0x12, 59 }, { 0x0000, 0x13, 60 }, { 0x0000, 0x14, 62 }, { 0x0000, 0x15, 63 }, { 0x0000, 0x16, 64 }, { 0x0000, 0x17, 66 }, { 0x0000, 0x18, 67 }, { 0x0000, 0x19, 69 }, { 0x0000, 0x1A, 70 }, { 0x0000, 0x1B, 72 }, { 0x0000, 0x1C, 73 }, { 0x0000, 0x1D, 75 }, { 0x0000, 0x1e, 77 }, { 0x0000, 0x1f, 78 }, { 0x0000, 0x20, 80 }, { 0x0400, 0x21, 82 }, { 0x0800, 0x22, 84 }, { 0x0c00, 0x23, 85 }, { 0x1000, 0x24, 87 }, { 0x1400, 0x25, 89 }, { 0x1800, 0x26, 91 }, { 0x1c00, 0x27, 93 }, { 0x2000, 0x28, 95 }, { 0x2400, 0x29, 97 }, { 0x2800, 0x2a, 99 }, { 0x2c00, 0x2b, 102 }, { 0x3000, 0x2c, 104 }, { 0x3400, 0x2d, 106 }, { 0x3800, 0x2e, 108 }, { 0x3c00, 0x2f, 111 }, { 0x4000, 0x30, 113 }, { 0x4400, 0x31, 116 }, { 0x4800, 0x32, 118 }, { 0x4c00, 0x33, 121 }, { 0x5000, 0x34, 123 }, { 0x5400, 0x35, 126 }, { 0x5800, 0x36, 129 }, { 0x5c00, 0x37, 132 }, { 0x6000, 0x38, 135 }, { 0x6400, 0x39, 137 }, { 0x6800, 0x3a, 141 }, { 0x6c00, 0x3b, 144 }, { 0x7000, 0x3c, 147 }, { 0x7400, 0x3d, 150 }, { 0x7800, 0x3e, 153 }, { 0x7c00, 0x3f, 157 }, { 0x8000, 0x40, 160 }, { 0x8400, 0x41, 164 }, { 0x8800, 0x42, 167 }, { 0x8c00, 0x43, 171 }, { 0x9000, 0x44, 174 }, { 0x9400, 0x45, 178 }, { 0x9800, 0x46, 182 }, { 0x9c00, 0x47, 186 }, { 0xa000, 0x48, 190 }, { 0xa400, 0x49, 194 }, { 0xa800, 0x4a, 199 }, { 0xac00, 0x4b, 203 }, { 0xb000, 0x4c, 207 }, { 0xb400, 0x4d, 212 }, { 0xb800, 0x4e, 217 }, { 0xbc00, 0x4f, 221 }, { 0xc000, 0x50, 226 }, { 0xc400, 0x51, 231 }, { 0xc800, 0x52, 236 }, { 0xcc00, 0x53, 241 }, { 0xd000, 0x54, 247 }, { 0xd400, 0x55, 252 }, { 0xd800, 0x56, 258 }, { 0xdc00, 0x57, 263 }, { 0xe000, 0x58, 269 }, { 0xe400, 0x59, 275 }, { 0xe800, 0x5a, 281 }, { 0xec00, 0x5b, 287 }, { 0xf000, 0x5c, 293 }, { 0xf400, 0x5d, 300 }, { 0xf800, 0x5e, 306 }, { 0xfc00, 0x5f, 313 }, { 0x0001, 0x60, 320 }, { 0x0401, 0x61, 327 }, { 0x0801, 0x62, 334 }, { 0x0c01, 0x63, 341 }, { 0x1001, 0x64, 349 }, { 0x1401, 0x65, 357 }, { 0x1801, 0x66, 364 }, { 0x1c01, 0x67, 372 }, { 0x2001, 0x68, 381 }, { 0x2401, 0x69, 389 }, { 0x2801, 0x6a, 397 }, { 0x2c01, 0x6b, 406 }, { 0x3001, 0x6c, 415 }, { 0x3401, 0x6d, 424 }, { 0x3801, 0x6e, 433 }, { 0x3c01, 0x6f, 443 }, { 0x4001, 0x70, 453 }, { 0x4401, 0x71, 462 }, { 0x4801, 0x72, 473 }, { 0x4c01, 0x73, 483 }, { 0x5001, 0x74, 494 }, { 0x5401, 0x75, 504 }, { 0x5801, 0x76, 515 }, { 0x5c01, 0x77, 527 }, { 0x6001, 0x78, 538 }, { 0x6401, 0x79, 550 }, { 0x6801, 0x7a, 562 }, { 0x6c01, 0x7b, 574 }, { 0x7001, 0x7c, 587 }, { 0x7401, 0x7d, 600 }, { 0x7801, 0x7e, 613 }, { 0x7c01, 0x7f, 626 }, { 0x8001, 0x00, 640 }, { 0x8401, 0x00, 654 }, { 0x8801, 0x00, 668 }, { 0x8c01, 0x00, 683 }, { 0x9001, 0x00, 698 }, { 0x9401, 0x00, 713 }, { 0x9801, 0x00, 729 }, { 0x9c01, 0x00, 745 }, { 0xa001, 0x00, 761 }, { 0xa401, 0x00, 778 }, { 0xa801, 0x00, 795 }, { 0xac01, 0x00, 812 }, { 0xb001, 0x00, 830 }, { 0xb401, 0x00, 848 }, { 0xb801, 0x00, 867 }, { 0xbc01, 0x00, 886 }, { 0xc001, 0x00, 905 }, { 0xc401, 0x00, 925 }, { 0xc801, 0x00, 945 }, { 0xcc01, 0x00, 966 }, { 0xd001, 0x00, 987 }, { 0xd401, 0x00, 1009 }, { 0xd801, 0x00, 1031 }, { 0xdc01, 0x00, 1053 }, { 0xe001, 0x00, 1076 }, { 0xe401, 0x00, 1100 }, { 0xe801, 0x00, 1124 }, { 0xec01, 0x00, 1149 }, { 0xf001, 0x00, 1174 }, { 0xf401, 0x00, 1199 }, { 0xf801, 0x00, 1226 }, { 0xfc01, 0x00, 1253 } }; #define joycon_max_rumble_amp (1003) static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = { /* high, low, amp */ { 0x00, 0x0040, 0 }, { 0x02, 0x8040, 10 }, { 0x04, 0x0041, 12 }, { 0x06, 0x8041, 14 }, { 0x08, 0x0042, 17 }, { 0x0a, 0x8042, 20 }, { 0x0c, 0x0043, 24 }, { 0x0e, 0x8043, 28 }, { 0x10, 0x0044, 33 }, { 0x12, 0x8044, 40 }, { 0x14, 0x0045, 47 }, { 0x16, 0x8045, 56 }, { 0x18, 0x0046, 67 }, { 0x1a, 0x8046, 80 }, { 0x1c, 0x0047, 95 }, { 0x1e, 0x8047, 112 }, { 0x20, 0x0048, 117 }, { 0x22, 0x8048, 123 }, { 0x24, 0x0049, 128 }, { 0x26, 0x8049, 134 }, { 0x28, 0x004a, 140 }, { 0x2a, 0x804a, 146 }, { 0x2c, 0x004b, 152 }, { 0x2e, 0x804b, 159 }, { 0x30, 0x004c, 166 }, { 0x32, 0x804c, 173 }, { 0x34, 0x004d, 181 }, { 0x36, 0x804d, 189 }, { 0x38, 0x004e, 198 }, { 0x3a, 0x804e, 206 }, { 0x3c, 0x004f, 215 }, { 0x3e, 0x804f, 225 }, { 0x40, 0x0050, 230 }, { 0x42, 0x8050, 235 }, { 0x44, 0x0051, 240 }, { 0x46, 0x8051, 245 }, { 0x48, 0x0052, 251 }, { 0x4a, 0x8052, 256 }, { 0x4c, 0x0053, 262 }, { 0x4e, 0x8053, 268 }, { 0x50, 0x0054, 273 }, { 0x52, 0x8054, 279 }, { 0x54, 0x0055, 286 }, { 0x56, 0x8055, 292 }, { 0x58, 0x0056, 298 }, { 0x5a, 0x8056, 305 }, { 0x5c, 0x0057, 311 }, { 0x5e, 0x8057, 318 }, { 0x60, 0x0058, 325 }, { 0x62, 0x8058, 332 }, { 0x64, 0x0059, 340 }, { 0x66, 0x8059, 347 }, { 0x68, 0x005a, 355 }, { 0x6a, 0x805a, 362 }, { 0x6c, 0x005b, 370 }, { 0x6e, 0x805b, 378 }, { 0x70, 0x005c, 387 }, { 0x72, 0x805c, 395 }, { 0x74, 0x005d, 404 }, { 0x76, 0x805d, 413 }, { 0x78, 0x005e, 422 }, { 0x7a, 0x805e, 431 }, { 0x7c, 0x005f, 440 }, { 0x7e, 0x805f, 450 }, { 0x80, 0x0060, 460 }, { 0x82, 0x8060, 470 }, { 0x84, 0x0061, 480 }, { 0x86, 0x8061, 491 }, { 0x88, 0x0062, 501 }, { 0x8a, 0x8062, 512 }, { 0x8c, 0x0063, 524 }, { 0x8e, 0x8063, 535 }, { 0x90, 0x0064, 547 }, { 0x92, 0x8064, 559 }, { 0x94, 0x0065, 571 }, { 0x96, 0x8065, 584 }, { 0x98, 0x0066, 596 }, { 0x9a, 0x8066, 609 }, { 0x9c, 0x0067, 623 }, { 0x9e, 0x8067, 636 }, { 0xa0, 0x0068, 650 }, { 0xa2, 0x8068, 665 }, { 0xa4, 0x0069, 679 }, { 0xa6, 0x8069, 694 }, { 0xa8, 0x006a, 709 }, { 0xaa, 0x806a, 725 }, { 0xac, 0x006b, 741 }, { 0xae, 0x806b, 757 }, { 0xb0, 0x006c, 773 }, { 0xb2, 0x806c, 790 }, { 0xb4, 0x006d, 808 }, { 0xb6, 0x806d, 825 }, { 0xb8, 0x006e, 843 }, { 0xba, 0x806e, 862 }, { 0xbc, 0x006f, 881 }, { 0xbe, 0x806f, 900 }, { 0xc0, 0x0070, 920 }, { 0xc2, 0x8070, 940 }, { 0xc4, 0x0071, 960 }, { 0xc6, 0x8071, 981 }, { 0xc8, 0x0072, joycon_max_rumble_amp } }; static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160; static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320; static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5; #endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */ static const u16 JC_RUMBLE_PERIOD_MS = 50; /* States for controller state machine */ enum joycon_ctlr_state { JOYCON_CTLR_STATE_INIT, JOYCON_CTLR_STATE_READ, JOYCON_CTLR_STATE_REMOVED, }; /* Controller type received as part of device info */ enum joycon_ctlr_type { JOYCON_CTLR_TYPE_JCL = 0x01, JOYCON_CTLR_TYPE_JCR = 0x02, JOYCON_CTLR_TYPE_PRO = 0x03, JOYCON_CTLR_TYPE_NESL = 0x09, JOYCON_CTLR_TYPE_NESR = 0x0A, JOYCON_CTLR_TYPE_SNES = 0x0B, JOYCON_CTLR_TYPE_GEN = 0x0D, JOYCON_CTLR_TYPE_N64 = 0x0C, }; struct joycon_stick_cal { s32 max; s32 min; s32 center; }; struct joycon_imu_cal { s16 offset[3]; s16 scale[3]; }; /* * All the controller's button values are stored in a u32. * They can be accessed with bitwise ANDs. */ #define JC_BTN_Y BIT(0) #define JC_BTN_X BIT(1) #define JC_BTN_B BIT(2) #define JC_BTN_A BIT(3) #define JC_BTN_SR_R BIT(4) #define JC_BTN_SL_R BIT(5) #define JC_BTN_R BIT(6) #define JC_BTN_ZR BIT(7) #define JC_BTN_MINUS BIT(8) #define JC_BTN_PLUS BIT(9) #define JC_BTN_RSTICK BIT(10) #define JC_BTN_LSTICK BIT(11) #define JC_BTN_HOME BIT(12) #define JC_BTN_CAP BIT(13) /* capture button */ #define JC_BTN_DOWN BIT(16) #define JC_BTN_UP BIT(17) #define JC_BTN_RIGHT BIT(18) #define JC_BTN_LEFT BIT(19) #define JC_BTN_SR_L BIT(20) #define JC_BTN_SL_L BIT(21) #define JC_BTN_L BIT(22) #define JC_BTN_ZL BIT(23) struct joycon_ctlr_button_mapping { u32 code; u32 bit; }; /* * D-pad is configured as buttons for the left Joy-Con only! */ static const struct joycon_ctlr_button_mapping left_joycon_button_mappings[] = { { BTN_TL, JC_BTN_L, }, { BTN_TL2, JC_BTN_ZL, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_THUMBL, JC_BTN_LSTICK, }, { BTN_DPAD_UP, JC_BTN_UP, }, { BTN_DPAD_DOWN, JC_BTN_DOWN, }, { BTN_DPAD_LEFT, JC_BTN_LEFT, }, { BTN_DPAD_RIGHT, JC_BTN_RIGHT, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; /* * The unused *right*-side triggers become the SL/SR triggers for the *left* * Joy-Con, if and only if we're not using a charging grip. */ static const struct joycon_ctlr_button_mapping left_joycon_s_button_mappings[] = { { BTN_TR, JC_BTN_SL_L, }, { BTN_TR2, JC_BTN_SR_L, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping right_joycon_button_mappings[] = { { BTN_EAST, JC_BTN_A, }, { BTN_SOUTH, JC_BTN_B, }, { BTN_NORTH, JC_BTN_X, }, { BTN_WEST, JC_BTN_Y, }, { BTN_TR, JC_BTN_R, }, { BTN_TR2, JC_BTN_ZR, }, { BTN_START, JC_BTN_PLUS, }, { BTN_THUMBR, JC_BTN_RSTICK, }, { BTN_MODE, JC_BTN_HOME, }, { /* sentinel */ }, }; /* * The unused *left*-side triggers become the SL/SR triggers for the *right* * Joy-Con, if and only if we're not using a charging grip. */ static const struct joycon_ctlr_button_mapping right_joycon_s_button_mappings[] = { { BTN_TL, JC_BTN_SL_R, }, { BTN_TL2, JC_BTN_SR_R, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping procon_button_mappings[] = { { BTN_EAST, JC_BTN_A, }, { BTN_SOUTH, JC_BTN_B, }, { BTN_NORTH, JC_BTN_X, }, { BTN_WEST, JC_BTN_Y, }, { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_TL2, JC_BTN_ZL, }, { BTN_TR2, JC_BTN_ZR, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_START, JC_BTN_PLUS, }, { BTN_THUMBL, JC_BTN_LSTICK, }, { BTN_THUMBR, JC_BTN_RSTICK, }, { BTN_MODE, JC_BTN_HOME, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping nescon_button_mappings[] = { { BTN_SOUTH, JC_BTN_A, }, { BTN_EAST, JC_BTN_B, }, { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_START, JC_BTN_PLUS, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = { { BTN_EAST, JC_BTN_A, }, { BTN_SOUTH, JC_BTN_B, }, { BTN_NORTH, JC_BTN_X, }, { BTN_WEST, JC_BTN_Y, }, { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_TL2, JC_BTN_ZL, }, { BTN_TR2, JC_BTN_ZR, }, { BTN_SELECT, JC_BTN_MINUS, }, { BTN_START, JC_BTN_PLUS, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = { { BTN_WEST, JC_BTN_A, }, /* A */ { BTN_SOUTH, JC_BTN_B, }, /* B */ { BTN_EAST, JC_BTN_R, }, /* C */ { BTN_TL, JC_BTN_X, }, /* X MD/GEN 6B Only */ { BTN_NORTH, JC_BTN_Y, }, /* Y MD/GEN 6B Only */ { BTN_TR, JC_BTN_L, }, /* Z MD/GEN 6B Only */ { BTN_SELECT, JC_BTN_ZR, }, /* Mode */ { BTN_START, JC_BTN_PLUS, }, { BTN_MODE, JC_BTN_HOME, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = { { BTN_A, JC_BTN_A, }, { BTN_B, JC_BTN_B, }, { BTN_TL2, JC_BTN_ZL, }, /* Z */ { BTN_TL, JC_BTN_L, }, { BTN_TR, JC_BTN_R, }, { BTN_TR2, JC_BTN_LSTICK, }, /* ZR */ { BTN_START, JC_BTN_PLUS, }, { BTN_SELECT, JC_BTN_Y, }, /* C UP */ { BTN_X, JC_BTN_ZR, }, /* C DOWN */ { BTN_Y, JC_BTN_X, }, /* C LEFT */ { BTN_C, JC_BTN_MINUS, }, /* C RIGHT */ { BTN_MODE, JC_BTN_HOME, }, { BTN_Z, JC_BTN_CAP, }, { /* sentinel */ }, }; enum joycon_msg_type { JOYCON_MSG_TYPE_NONE, JOYCON_MSG_TYPE_USB, JOYCON_MSG_TYPE_SUBCMD, }; struct joycon_rumble_output { u8 output_id; u8 packet_num; u8 rumble_data[8]; } __packed; struct joycon_subcmd_request { u8 output_id; /* must be 0x01 for subcommand, 0x10 for rumble only */ u8 packet_num; /* incremented every send */ u8 rumble_data[8]; u8 subcmd_id; u8 data[]; /* length depends on the subcommand */ } __packed; struct joycon_subcmd_reply { u8 ack; /* MSB 1 for ACK, 0 for NACK */ u8 id; /* id of requested subcmd */ u8 data[]; /* will be at most 35 bytes */ } __packed; struct joycon_imu_data { s16 accel_x; s16 accel_y; s16 accel_z; s16 gyro_x; s16 gyro_y; s16 gyro_z; } __packed; struct joycon_input_report { u8 id; u8 timer; u8 bat_con; /* battery and connection info */ u8 button_status[3]; u8 left_stick[3]; u8 right_stick[3]; u8 vibrator_report; union { struct joycon_subcmd_reply subcmd_reply; /* IMU input reports contain 3 samples */ u8 imu_raw_bytes[sizeof(struct joycon_imu_data) * 3]; }; } __packed; #define JC_MAX_RESP_SIZE (sizeof(struct joycon_input_report) + 35) #define JC_RUMBLE_DATA_SIZE 8 #define JC_RUMBLE_QUEUE_SIZE 8 static const char * const joycon_player_led_names[] = { LED_FUNCTION_PLAYER1, LED_FUNCTION_PLAYER2, LED_FUNCTION_PLAYER3, LED_FUNCTION_PLAYER4, }; #define JC_NUM_LEDS ARRAY_SIZE(joycon_player_led_names) #define JC_NUM_LED_PATTERNS 8 /* Taken from https://www.nintendo.com/my/support/qa/detail/33822 */ static const enum led_brightness joycon_player_led_patterns[JC_NUM_LED_PATTERNS][JC_NUM_LEDS] = { { 1, 0, 0, 0 }, { 1, 1, 0, 0 }, { 1, 1, 1, 0 }, { 1, 1, 1, 1 }, { 1, 0, 0, 1 }, { 1, 0, 1, 0 }, { 1, 0, 1, 1 }, { 0, 1, 1, 0 }, }; /* Each physical controller is associated with a joycon_ctlr struct */ struct joycon_ctlr { struct hid_device *hdev; struct input_dev *input; u32 player_id; struct led_classdev leds[JC_NUM_LEDS]; /* player leds */ struct led_classdev home_led; enum joycon_ctlr_state ctlr_state; spinlock_t lock; u8 mac_addr[6]; char *mac_addr_str; enum joycon_ctlr_type ctlr_type; /* The following members are used for synchronous sends/receives */ enum joycon_msg_type msg_type; u8 subcmd_num; struct mutex output_mutex; u8 input_buf[JC_MAX_RESP_SIZE]; wait_queue_head_t wait; bool received_resp; u8 usb_ack_match; u8 subcmd_ack_match; bool received_input_report; unsigned int last_input_report_msecs; unsigned int last_subcmd_sent_msecs; unsigned int consecutive_valid_report_deltas; /* factory calibration data */ struct joycon_stick_cal left_stick_cal_x; struct joycon_stick_cal left_stick_cal_y; struct joycon_stick_cal right_stick_cal_x; struct joycon_stick_cal right_stick_cal_y; struct joycon_imu_cal accel_cal; struct joycon_imu_cal gyro_cal; /* prevents needlessly recalculating these divisors every sample */ s32 imu_cal_accel_divisor[3]; s32 imu_cal_gyro_divisor[3]; /* power supply data */ struct power_supply *battery; struct power_supply_desc battery_desc; u8 battery_capacity; bool battery_charging; bool host_powered; /* rumble */ u8 rumble_data[JC_RUMBLE_QUEUE_SIZE][JC_RUMBLE_DATA_SIZE]; int rumble_queue_head; int rumble_queue_tail; struct workqueue_struct *rumble_queue; struct work_struct rumble_worker; unsigned int rumble_msecs; u16 rumble_ll_freq; u16 rumble_lh_freq; u16 rumble_rl_freq; u16 rumble_rh_freq; unsigned short rumble_zero_countdown; /* imu */ struct input_dev *imu_input; bool imu_first_packet_received; /* helps in initiating timestamp */ unsigned int imu_timestamp_us; /* timestamp we report to userspace */ unsigned int imu_last_pkt_ms; /* used to calc imu report delta */ /* the following are used to track the average imu report time delta */ unsigned int imu_delta_samples_count; unsigned int imu_delta_samples_sum; unsigned int imu_avg_delta_ms; }; /* Helper macros for checking controller type */ #define jc_type_is_joycon(ctlr) \ (ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONL || \ ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONR || \ ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP) #define jc_type_is_procon(ctlr) \ (ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_PROCON) #define jc_type_is_chrggrip(ctlr) \ (ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP) /* Does this controller have inputs associated with left joycon? */ #define jc_type_has_left(ctlr) \ (ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCL || \ ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO || \ ctlr->ctlr_type == JOYCON_CTLR_TYPE_N64) /* Does this controller have inputs associated with right joycon? */ #define jc_type_has_right(ctlr) \ (ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR || \ ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO) /* * Controller device helpers * * These look at the device ID known to the HID subsystem to identify a device, * but take caution: some NSO devices lie about themselves (NES Joy-Cons and * Sega Genesis controller). See type helpers below. * * These helpers are most useful early during the HID probe or in conjunction * with the capability helpers below. */ static inline bool joycon_device_is_chrggrip(struct joycon_ctlr *ctlr) { return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP; } /* * Controller type helpers * * These are slightly different than the device-ID-based helpers above. They are * generally more reliable, since they can distinguish between, e.g., Genesis * versus SNES, or NES Joy-Cons versus regular Switch Joy-Cons. They're most * useful for reporting available inputs. For other kinds of distinctions, see * the capability helpers below. * * They have two major drawbacks: (1) they're not available until after we set * the reporting method and then request the device info; (2) they can't * distinguish all controllers (like the Charging Grip from the Pro controller.) */ static inline bool joycon_type_is_left_joycon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCL; } static inline bool joycon_type_is_right_joycon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR; } static inline bool joycon_type_is_procon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO; } static inline bool joycon_type_is_snescon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_SNES; } static inline bool joycon_type_is_gencon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_GEN; } static inline bool joycon_type_is_n64con(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_N64; } static inline bool joycon_type_is_left_nescon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_NESL; } static inline bool joycon_type_is_right_nescon(struct joycon_ctlr *ctlr) { return ctlr->ctlr_type == JOYCON_CTLR_TYPE_NESR; } static inline bool joycon_type_is_any_joycon(struct joycon_ctlr *ctlr) { return joycon_type_is_left_joycon(ctlr) || joycon_type_is_right_joycon(ctlr) || joycon_device_is_chrggrip(ctlr); } static inline bool joycon_type_is_any_nescon(struct joycon_ctlr *ctlr) { return joycon_type_is_left_nescon(ctlr) || joycon_type_is_right_nescon(ctlr); } /* * Controller capability helpers * * These helpers combine the use of the helpers above to detect certain * capabilities during initialization. They are always accurate but (since they * use type helpers) cannot be used early in the HID probe. */ static inline bool joycon_has_imu(struct joycon_ctlr *ctlr) { return joycon_device_is_chrggrip(ctlr) || joycon_type_is_any_joycon(ctlr) || joycon_type_is_procon(ctlr); } static inline bool joycon_has_joysticks(struct joycon_ctlr *ctlr) { return joycon_device_is_chrggrip(ctlr) || joycon_type_is_any_joycon(ctlr) || joycon_type_is_procon(ctlr) || joycon_type_is_n64con(ctlr); } static inline bool joycon_has_rumble(struct joycon_ctlr *ctlr) { return joycon_device_is_chrggrip(ctlr) || joycon_type_is_any_joycon(ctlr) || joycon_type_is_procon(ctlr) || joycon_type_is_n64con(ctlr); } static inline bool joycon_using_usb(struct joycon_ctlr *ctlr) { return ctlr->hdev->bus == BUS_USB; } static int __joycon_hid_send(struct hid_device *hdev, u8 *data, size_t len) { u8 *buf; int ret; buf = kmemdup(data, len, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_output_report(hdev, buf, len); kfree(buf); if (ret < 0) hid_dbg(hdev, "Failed to send output report ret=%d\n", ret); return ret; } static void joycon_wait_for_input_report(struct joycon_ctlr *ctlr) { int ret; /* * If we are in the proper reporting mode, wait for an input * report prior to sending the subcommand. This improves * reliability considerably. */ if (ctlr->ctlr_state == JOYCON_CTLR_STATE_READ) { unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); ctlr->received_input_report = false; spin_unlock_irqrestore(&ctlr->lock, flags); ret = wait_event_timeout(ctlr->wait, ctlr->received_input_report, HZ / 4); /* We will still proceed, even with a timeout here */ if (!ret) hid_warn(ctlr->hdev, "timeout waiting for input report\n"); } } /* * Sending subcommands and/or rumble data at too high a rate can cause bluetooth * controller disconnections. */ #define JC_INPUT_REPORT_MIN_DELTA 8 #define JC_INPUT_REPORT_MAX_DELTA 17 #define JC_SUBCMD_TX_OFFSET_MS 4 #define JC_SUBCMD_VALID_DELTA_REQ 3 #define JC_SUBCMD_RATE_MAX_ATTEMPTS 500 #define JC_SUBCMD_RATE_LIMITER_USB_MS 20 #define JC_SUBCMD_RATE_LIMITER_BT_MS 60 #define JC_SUBCMD_RATE_LIMITER_MS(ctlr) ((ctlr)->hdev->bus == BUS_USB ? JC_SUBCMD_RATE_LIMITER_USB_MS : JC_SUBCMD_RATE_LIMITER_BT_MS) static void joycon_enforce_subcmd_rate(struct joycon_ctlr *ctlr) { unsigned int current_ms; unsigned long subcmd_delta; int consecutive_valid_deltas = 0; int attempts = 0; unsigned long flags; if (unlikely(ctlr->ctlr_state != JOYCON_CTLR_STATE_READ)) return; do { joycon_wait_for_input_report(ctlr); current_ms = jiffies_to_msecs(jiffies); subcmd_delta = current_ms - ctlr->last_subcmd_sent_msecs; spin_lock_irqsave(&ctlr->lock, flags); consecutive_valid_deltas = ctlr->consecutive_valid_report_deltas; spin_unlock_irqrestore(&ctlr->lock, flags); attempts++; } while ((consecutive_valid_deltas < JC_SUBCMD_VALID_DELTA_REQ || subcmd_delta < JC_SUBCMD_RATE_LIMITER_MS(ctlr)) && ctlr->ctlr_state == JOYCON_CTLR_STATE_READ && attempts < JC_SUBCMD_RATE_MAX_ATTEMPTS); if (attempts >= JC_SUBCMD_RATE_MAX_ATTEMPTS) { hid_warn(ctlr->hdev, "%s: exceeded max attempts", __func__); return; } ctlr->last_subcmd_sent_msecs = current_ms; /* * Wait a short time after receiving an input report before * transmitting. This should reduce odds of a TX coinciding with an RX. * Minimizing concurrent BT traffic with the controller seems to lower * the rate of disconnections. */ msleep(JC_SUBCMD_TX_OFFSET_MS); } static int joycon_hid_send_sync(struct joycon_ctlr *ctlr, u8 *data, size_t len, u32 timeout) { int ret; int tries = 2; /* * The controller occasionally seems to drop subcommands. In testing, * doing one retry after a timeout appears to always work. */ while (tries--) { joycon_enforce_subcmd_rate(ctlr); ret = __joycon_hid_send(ctlr->hdev, data, len); if (ret < 0) { memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE); return ret; } ret = wait_event_timeout(ctlr->wait, ctlr->received_resp, timeout); if (!ret) { hid_dbg(ctlr->hdev, "synchronous send/receive timed out\n"); if (tries) { hid_dbg(ctlr->hdev, "retrying sync send after timeout\n"); } memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE); ret = -ETIMEDOUT; } else { ret = 0; break; } } ctlr->received_resp = false; return ret; } static int joycon_send_usb(struct joycon_ctlr *ctlr, u8 cmd, u32 timeout) { int ret; u8 buf[2] = {JC_OUTPUT_USB_CMD}; buf[1] = cmd; ctlr->usb_ack_match = cmd; ctlr->msg_type = JOYCON_MSG_TYPE_USB; ret = joycon_hid_send_sync(ctlr, buf, sizeof(buf), timeout); if (ret) hid_dbg(ctlr->hdev, "send usb command failed; ret=%d\n", ret); return ret; } static int joycon_send_subcmd(struct joycon_ctlr *ctlr, struct joycon_subcmd_request *subcmd, size_t data_len, u32 timeout) { int ret; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); /* * If the controller has been removed, just return ENODEV so the LED * subsystem doesn't print invalid errors on removal. */ if (ctlr->ctlr_state == JOYCON_CTLR_STATE_REMOVED) { spin_unlock_irqrestore(&ctlr->lock, flags); return -ENODEV; } memcpy(subcmd->rumble_data, ctlr->rumble_data[ctlr->rumble_queue_tail], JC_RUMBLE_DATA_SIZE); spin_unlock_irqrestore(&ctlr->lock, flags); subcmd->output_id = JC_OUTPUT_RUMBLE_AND_SUBCMD; subcmd->packet_num = ctlr->subcmd_num; if (++ctlr->subcmd_num > 0xF) ctlr->subcmd_num = 0; ctlr->subcmd_ack_match = subcmd->subcmd_id; ctlr->msg_type = JOYCON_MSG_TYPE_SUBCMD; ret = joycon_hid_send_sync(ctlr, (u8 *)subcmd, sizeof(*subcmd) + data_len, timeout); if (ret < 0) hid_dbg(ctlr->hdev, "send subcommand failed; ret=%d\n", ret); else ret = 0; return ret; } /* Supply nibbles for flash and on. Ones correspond to active */ static int joycon_set_player_leds(struct joycon_ctlr *ctlr, u8 flash, u8 on) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SET_PLAYER_LIGHTS; req->data[0] = (flash << 4) | on; hid_dbg(ctlr->hdev, "setting player leds\n"); return joycon_send_subcmd(ctlr, req, 1, HZ/4); } static int joycon_set_home_led(struct joycon_ctlr *ctlr, enum led_brightness brightness) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 5] = { 0 }; u8 *data; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SET_HOME_LIGHT; data = req->data; data[0] = 0x01; data[1] = brightness << 4; data[2] = brightness | (brightness << 4); data[3] = 0x11; data[4] = 0x11; hid_dbg(ctlr->hdev, "setting home led brightness\n"); return joycon_send_subcmd(ctlr, req, 5, HZ/4); } static int joycon_request_spi_flash_read(struct joycon_ctlr *ctlr, u32 start_addr, u8 size, u8 **reply) { struct joycon_subcmd_request *req; struct joycon_input_report *report; u8 buffer[sizeof(*req) + 5] = { 0 }; u8 *data; int ret; if (!reply) return -EINVAL; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SPI_FLASH_READ; data = req->data; put_unaligned_le32(start_addr, data); data[4] = size; hid_dbg(ctlr->hdev, "requesting SPI flash data\n"); ret = joycon_send_subcmd(ctlr, req, 5, HZ); if (ret) { hid_err(ctlr->hdev, "failed reading SPI flash; ret=%d\n", ret); } else { report = (struct joycon_input_report *)ctlr->input_buf; /* The read data starts at the 6th byte */ *reply = &report->subcmd_reply.data[5]; } return ret; } /* * User calibration's presence is denoted with a magic byte preceding it. * returns 0 if magic val is present, 1 if not present, < 0 on error */ static int joycon_check_for_cal_magic(struct joycon_ctlr *ctlr, u32 flash_addr) { int ret; u8 *reply; ret = joycon_request_spi_flash_read(ctlr, flash_addr, JC_CAL_USR_MAGIC_SIZE, &reply); if (ret) return ret; return reply[0] != JC_CAL_USR_MAGIC_0 || reply[1] != JC_CAL_USR_MAGIC_1; } static int joycon_read_stick_calibration(struct joycon_ctlr *ctlr, u16 cal_addr, struct joycon_stick_cal *cal_x, struct joycon_stick_cal *cal_y, bool left_stick) { s32 x_max_above; s32 x_min_below; s32 y_max_above; s32 y_min_below; u8 *raw_cal; int ret; ret = joycon_request_spi_flash_read(ctlr, cal_addr, JC_CAL_STICK_DATA_SIZE, &raw_cal); if (ret) return ret; /* stick calibration parsing: note the order differs based on stick */ if (left_stick) { x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12); y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12); cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12); cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12); x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12); y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12); } else { cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12); cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12); x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12); y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12); x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12); y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12); } cal_x->max = cal_x->center + x_max_above; cal_x->min = cal_x->center - x_min_below; cal_y->max = cal_y->center + y_max_above; cal_y->min = cal_y->center - y_min_below; /* check if calibration values are plausible */ if (cal_x->min >= cal_x->center || cal_x->center >= cal_x->max || cal_y->min >= cal_y->center || cal_y->center >= cal_y->max) ret = -EINVAL; return ret; } static const u16 DFLT_STICK_CAL_CEN = 2000; static const u16 DFLT_STICK_CAL_MAX = 3500; static const u16 DFLT_STICK_CAL_MIN = 500; static void joycon_use_default_calibration(struct hid_device *hdev, struct joycon_stick_cal *cal_x, struct joycon_stick_cal *cal_y, const char *stick, int ret) { hid_warn(hdev, "Failed to read %s stick cal, using defaults; e=%d\n", stick, ret); cal_x->center = cal_y->center = DFLT_STICK_CAL_CEN; cal_x->max = cal_y->max = DFLT_STICK_CAL_MAX; cal_x->min = cal_y->min = DFLT_STICK_CAL_MIN; } static int joycon_request_calibration(struct joycon_ctlr *ctlr) { u16 left_stick_addr = JC_CAL_FCT_DATA_LEFT_ADDR; u16 right_stick_addr = JC_CAL_FCT_DATA_RIGHT_ADDR; int ret; hid_dbg(ctlr->hdev, "requesting cal data\n"); /* check if user stick calibrations are present */ if (!joycon_check_for_cal_magic(ctlr, JC_CAL_USR_LEFT_MAGIC_ADDR)) { left_stick_addr = JC_CAL_USR_LEFT_DATA_ADDR; hid_info(ctlr->hdev, "using user cal for left stick\n"); } else { hid_info(ctlr->hdev, "using factory cal for left stick\n"); } if (!joycon_check_for_cal_magic(ctlr, JC_CAL_USR_RIGHT_MAGIC_ADDR)) { right_stick_addr = JC_CAL_USR_RIGHT_DATA_ADDR; hid_info(ctlr->hdev, "using user cal for right stick\n"); } else { hid_info(ctlr->hdev, "using factory cal for right stick\n"); } /* read the left stick calibration data */ ret = joycon_read_stick_calibration(ctlr, left_stick_addr, &ctlr->left_stick_cal_x, &ctlr->left_stick_cal_y, true); if (ret) joycon_use_default_calibration(ctlr->hdev, &ctlr->left_stick_cal_x, &ctlr->left_stick_cal_y, "left", ret); /* read the right stick calibration data */ ret = joycon_read_stick_calibration(ctlr, right_stick_addr, &ctlr->right_stick_cal_x, &ctlr->right_stick_cal_y, false); if (ret) joycon_use_default_calibration(ctlr->hdev, &ctlr->right_stick_cal_x, &ctlr->right_stick_cal_y, "right", ret); hid_dbg(ctlr->hdev, "calibration:\n" "l_x_c=%d l_x_max=%d l_x_min=%d\n" "l_y_c=%d l_y_max=%d l_y_min=%d\n" "r_x_c=%d r_x_max=%d r_x_min=%d\n" "r_y_c=%d r_y_max=%d r_y_min=%d\n", ctlr->left_stick_cal_x.center, ctlr->left_stick_cal_x.max, ctlr->left_stick_cal_x.min, ctlr->left_stick_cal_y.center, ctlr->left_stick_cal_y.max, ctlr->left_stick_cal_y.min, ctlr->right_stick_cal_x.center, ctlr->right_stick_cal_x.max, ctlr->right_stick_cal_x.min, ctlr->right_stick_cal_y.center, ctlr->right_stick_cal_y.max, ctlr->right_stick_cal_y.min); return 0; } /* * These divisors are calculated once rather than for each sample. They are only * dependent on the IMU calibration values. They are used when processing the * IMU input reports. */ static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr) { int i, divz = 0; for (i = 0; i < 3; i++) { ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] - ctlr->accel_cal.offset[i]; ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] - ctlr->gyro_cal.offset[i]; if (ctlr->imu_cal_accel_divisor[i] == 0) { ctlr->imu_cal_accel_divisor[i] = 1; divz++; } if (ctlr->imu_cal_gyro_divisor[i] == 0) { ctlr->imu_cal_gyro_divisor[i] = 1; divz++; } } if (divz) hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz); } static const s16 DFLT_ACCEL_OFFSET /*= 0*/; static const s16 DFLT_ACCEL_SCALE = 16384; static const s16 DFLT_GYRO_OFFSET /*= 0*/; static const s16 DFLT_GYRO_SCALE = 13371; static int joycon_request_imu_calibration(struct joycon_ctlr *ctlr) { u16 imu_cal_addr = JC_IMU_CAL_FCT_DATA_ADDR; u8 *raw_cal; int ret; int i; /* check if user calibration exists */ if (!joycon_check_for_cal_magic(ctlr, JC_IMU_CAL_USR_MAGIC_ADDR)) { imu_cal_addr = JC_IMU_CAL_USR_DATA_ADDR; hid_info(ctlr->hdev, "using user cal for IMU\n"); } else { hid_info(ctlr->hdev, "using factory cal for IMU\n"); } /* request IMU calibration data */ hid_dbg(ctlr->hdev, "requesting IMU cal data\n"); ret = joycon_request_spi_flash_read(ctlr, imu_cal_addr, JC_IMU_CAL_DATA_SIZE, &raw_cal); if (ret) { hid_warn(ctlr->hdev, "Failed to read IMU cal, using defaults; ret=%d\n", ret); for (i = 0; i < 3; i++) { ctlr->accel_cal.offset[i] = DFLT_ACCEL_OFFSET; ctlr->accel_cal.scale[i] = DFLT_ACCEL_SCALE; ctlr->gyro_cal.offset[i] = DFLT_GYRO_OFFSET; ctlr->gyro_cal.scale[i] = DFLT_GYRO_SCALE; } joycon_calc_imu_cal_divisors(ctlr); return ret; } /* IMU calibration parsing */ for (i = 0; i < 3; i++) { int j = i * 2; ctlr->accel_cal.offset[i] = get_unaligned_le16(raw_cal + j); ctlr->accel_cal.scale[i] = get_unaligned_le16(raw_cal + j + 6); ctlr->gyro_cal.offset[i] = get_unaligned_le16(raw_cal + j + 12); ctlr->gyro_cal.scale[i] = get_unaligned_le16(raw_cal + j + 18); } joycon_calc_imu_cal_divisors(ctlr); hid_dbg(ctlr->hdev, "IMU calibration:\n" "a_o[0]=%d a_o[1]=%d a_o[2]=%d\n" "a_s[0]=%d a_s[1]=%d a_s[2]=%d\n" "g_o[0]=%d g_o[1]=%d g_o[2]=%d\n" "g_s[0]=%d g_s[1]=%d g_s[2]=%d\n", ctlr->accel_cal.offset[0], ctlr->accel_cal.offset[1], ctlr->accel_cal.offset[2], ctlr->accel_cal.scale[0], ctlr->accel_cal.scale[1], ctlr->accel_cal.scale[2], ctlr->gyro_cal.offset[0], ctlr->gyro_cal.offset[1], ctlr->gyro_cal.offset[2], ctlr->gyro_cal.scale[0], ctlr->gyro_cal.scale[1], ctlr->gyro_cal.scale[2]); return 0; } static int joycon_set_report_mode(struct joycon_ctlr *ctlr) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_SET_REPORT_MODE; req->data[0] = 0x30; /* standard, full report mode */ hid_dbg(ctlr->hdev, "setting controller report mode\n"); return joycon_send_subcmd(ctlr, req, 1, HZ); } static int joycon_enable_rumble(struct joycon_ctlr *ctlr) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_ENABLE_VIBRATION; req->data[0] = 0x01; /* note: 0x00 would disable */ hid_dbg(ctlr->hdev, "enabling rumble\n"); return joycon_send_subcmd(ctlr, req, 1, HZ/4); } static int joycon_enable_imu(struct joycon_ctlr *ctlr) { struct joycon_subcmd_request *req; u8 buffer[sizeof(*req) + 1] = { 0 }; req = (struct joycon_subcmd_request *)buffer; req->subcmd_id = JC_SUBCMD_ENABLE_IMU; req->data[0] = 0x01; /* note: 0x00 would disable */ hid_dbg(ctlr->hdev, "enabling IMU\n"); return joycon_send_subcmd(ctlr, req, 1, HZ); } static s32 joycon_map_stick_val(struct joycon_stick_cal *cal, s32 val) { s32 center = cal->center; s32 min = cal->min; s32 max = cal->max; s32 new_val; if (val > center) { new_val = (val - center) * JC_MAX_STICK_MAG; new_val /= (max - center); } else { new_val = (center - val) * -JC_MAX_STICK_MAG; new_val /= (center - min); } new_val = clamp(new_val, (s32)-JC_MAX_STICK_MAG, (s32)JC_MAX_STICK_MAG); return new_val; } static void joycon_input_report_parse_imu_data(struct joycon_ctlr *ctlr, struct joycon_input_report *rep, struct joycon_imu_data *imu_data) { u8 *raw = rep->imu_raw_bytes; int i; for (i = 0; i < 3; i++) { struct joycon_imu_data *data = &imu_data[i]; data->accel_x = get_unaligned_le16(raw + 0); data->accel_y = get_unaligned_le16(raw + 2); data->accel_z = get_unaligned_le16(raw + 4); data->gyro_x = get_unaligned_le16(raw + 6); data->gyro_y = get_unaligned_le16(raw + 8); data->gyro_z = get_unaligned_le16(raw + 10); /* point to next imu sample */ raw += sizeof(struct joycon_imu_data); } } static void joycon_parse_imu_report(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { struct joycon_imu_data imu_data[3] = {0}; /* 3 reports per packet */ struct input_dev *idev = ctlr->imu_input; unsigned int msecs = jiffies_to_msecs(jiffies); unsigned int last_msecs = ctlr->imu_last_pkt_ms; int i; int value[6]; joycon_input_report_parse_imu_data(ctlr, rep, imu_data); /* * There are complexities surrounding how we determine the timestamps we * associate with the samples we pass to userspace. The IMU input * reports do not provide us with a good timestamp. There's a quickly * incrementing 8-bit counter per input report, but it is not very * useful for this purpose (it is not entirely clear what rate it * increments at or if it varies based on packet push rate - more on * the push rate below...). * * The reverse engineering work done on the joy-cons and pro controllers * by the community seems to indicate the following: * - The controller samples the IMU every 1.35ms. It then does some of * its own processing, probably averaging the samples out. * - Each imu input report contains 3 IMU samples, (usually 5ms apart). * - In the standard reporting mode (which this driver uses exclusively) * input reports are pushed from the controller as follows: * * joy-con (bluetooth): every 15 ms * * joy-cons (in charging grip via USB): every 15 ms * * pro controller (USB): every 15 ms * * pro controller (bluetooth): every 8 ms (this is the wildcard) * * Further complicating matters is that some bluetooth stacks are known * to alter the controller's packet rate by hardcoding the bluetooth * SSR for the switch controllers (android's stack currently sets the * SSR to 11ms for both the joy-cons and pro controllers). * * In my own testing, I've discovered that my pro controller either * reports IMU sample batches every 11ms or every 15ms. This rate is * stable after connecting. It isn't 100% clear what determines this * rate. Importantly, even when sending every 11ms, none of the samples * are duplicates. This seems to indicate that the time deltas between * reported samples can vary based on the input report rate. * * The solution employed in this driver is to keep track of the average * time delta between IMU input reports. In testing, this value has * proven to be stable, staying at 15ms or 11ms, though other hardware * configurations and bluetooth stacks could potentially see other rates * (hopefully this will become more clear as more people use the * driver). * * Keeping track of the average report delta allows us to submit our * timestamps to userspace based on that. Each report contains 3 * samples, so the IMU sampling rate should be avg_time_delta/3. We can * also use this average to detect events where we have dropped a * packet. The userspace timestamp for the samples will be adjusted * accordingly to prevent unwanted behvaior. */ if (!ctlr->imu_first_packet_received) { ctlr->imu_timestamp_us = 0; ctlr->imu_delta_samples_count = 0; ctlr->imu_delta_samples_sum = 0; ctlr->imu_avg_delta_ms = JC_IMU_DFLT_AVG_DELTA_MS; ctlr->imu_first_packet_received = true; } else { unsigned int delta = msecs - last_msecs; unsigned int dropped_pkts; unsigned int dropped_threshold; /* avg imu report delta housekeeping */ ctlr->imu_delta_samples_sum += delta; ctlr->imu_delta_samples_count++; if (ctlr->imu_delta_samples_count >= JC_IMU_SAMPLES_PER_DELTA_AVG) { ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum / ctlr->imu_delta_samples_count; ctlr->imu_delta_samples_count = 0; ctlr->imu_delta_samples_sum = 0; } /* don't ever want divide by zero shenanigans */ if (ctlr->imu_avg_delta_ms == 0) { ctlr->imu_avg_delta_ms = 1; hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n"); } /* useful for debugging IMU sample rate */ hid_dbg(ctlr->hdev, "imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n", msecs, last_msecs, delta, ctlr->imu_avg_delta_ms); /* check if any packets have been dropped */ dropped_threshold = ctlr->imu_avg_delta_ms * 3 / 2; dropped_pkts = (delta - min(delta, dropped_threshold)) / ctlr->imu_avg_delta_ms; ctlr->imu_timestamp_us += 1000 * ctlr->imu_avg_delta_ms; if (dropped_pkts > JC_IMU_DROPPED_PKT_WARNING) { hid_warn(ctlr->hdev, "compensating for %u dropped IMU reports\n", dropped_pkts); hid_warn(ctlr->hdev, "delta=%u avg_delta=%u\n", delta, ctlr->imu_avg_delta_ms); } } ctlr->imu_last_pkt_ms = msecs; /* Each IMU input report contains three samples */ for (i = 0; i < 3; i++) { input_event(idev, EV_MSC, MSC_TIMESTAMP, ctlr->imu_timestamp_us); /* * These calculations (which use the controller's calibration * settings to improve the final values) are based on those * found in the community's reverse-engineering repo (linked at * top of driver). For hid-nintendo, we make sure that the final * value given to userspace is always in terms of the axis * resolution we provided. * * Currently only the gyro calculations subtract the calibration * offsets from the raw value itself. In testing, doing the same * for the accelerometer raw values decreased accuracy. * * Note that the gyro values are multiplied by the * precision-saving scaling factor to prevent large inaccuracies * due to truncation of the resolution value which would * otherwise occur. To prevent overflow (without resorting to 64 * bit integer math), the mult_frac macro is used. */ value[0] = mult_frac((JC_IMU_PREC_RANGE_SCALE * (imu_data[i].gyro_x - ctlr->gyro_cal.offset[0])), ctlr->gyro_cal.scale[0], ctlr->imu_cal_gyro_divisor[0]); value[1] = mult_frac((JC_IMU_PREC_RANGE_SCALE * (imu_data[i].gyro_y - ctlr->gyro_cal.offset[1])), ctlr->gyro_cal.scale[1], ctlr->imu_cal_gyro_divisor[1]); value[2] = mult_frac((JC_IMU_PREC_RANGE_SCALE * (imu_data[i].gyro_z - ctlr->gyro_cal.offset[2])), ctlr->gyro_cal.scale[2], ctlr->imu_cal_gyro_divisor[2]); value[3] = ((s32)imu_data[i].accel_x * ctlr->accel_cal.scale[0]) / ctlr->imu_cal_accel_divisor[0]; value[4] = ((s32)imu_data[i].accel_y * ctlr->accel_cal.scale[1]) / ctlr->imu_cal_accel_divisor[1]; value[5] = ((s32)imu_data[i].accel_z * ctlr->accel_cal.scale[2]) / ctlr->imu_cal_accel_divisor[2]; hid_dbg(ctlr->hdev, "raw_gyro: g_x=%d g_y=%d g_z=%d\n", imu_data[i].gyro_x, imu_data[i].gyro_y, imu_data[i].gyro_z); hid_dbg(ctlr->hdev, "raw_accel: a_x=%d a_y=%d a_z=%d\n", imu_data[i].accel_x, imu_data[i].accel_y, imu_data[i].accel_z); /* * The right joy-con has 2 axes negated, Y and Z. This is due to * the orientation of the IMU in the controller. We negate those * axes' values in order to be consistent with the left joy-con * and the pro controller: * X: positive is pointing toward the triggers * Y: positive is pointing to the left * Z: positive is pointing up (out of the buttons/sticks) * The axes follow the right-hand rule. */ if (jc_type_is_joycon(ctlr) && jc_type_has_right(ctlr)) { int j; /* negate all but x axis */ for (j = 1; j < 6; ++j) { if (j == 3) continue; value[j] *= -1; } } input_report_abs(idev, ABS_RX, value[0]); input_report_abs(idev, ABS_RY, value[1]); input_report_abs(idev, ABS_RZ, value[2]); input_report_abs(idev, ABS_X, value[3]); input_report_abs(idev, ABS_Y, value[4]); input_report_abs(idev, ABS_Z, value[5]); input_sync(idev); /* convert to micros and divide by 3 (3 samples per report). */ ctlr->imu_timestamp_us += ctlr->imu_avg_delta_ms * 1000 / 3; } } static void joycon_handle_rumble_report(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { unsigned long flags; unsigned long msecs = jiffies_to_msecs(jiffies); spin_lock_irqsave(&ctlr->lock, flags); if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED && (msecs - ctlr->rumble_msecs) >= JC_RUMBLE_PERIOD_MS && (ctlr->rumble_queue_head != ctlr->rumble_queue_tail || ctlr->rumble_zero_countdown > 0)) { /* * When this value reaches 0, we know we've sent multiple * packets to the controller instructing it to disable rumble. * We can safely stop sending periodic rumble packets until the * next ff effect. */ if (ctlr->rumble_zero_countdown > 0) ctlr->rumble_zero_countdown--; queue_work(ctlr->rumble_queue, &ctlr->rumble_worker); } spin_unlock_irqrestore(&ctlr->lock, flags); } static void joycon_parse_battery_status(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { u8 tmp; unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); tmp = rep->bat_con; ctlr->host_powered = tmp & BIT(0); ctlr->battery_charging = tmp & BIT(4); tmp = tmp >> 5; switch (tmp) { case 0: /* empty */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; break; case 1: /* low */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_LOW; break; case 2: /* medium */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; case 3: /* high */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_HIGH; break; case 4: /* full */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_FULL; break; default: ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; hid_warn(ctlr->hdev, "Invalid battery status\n"); break; } spin_unlock_irqrestore(&ctlr->lock, flags); } static void joycon_report_left_stick(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { u16 raw_x; u16 raw_y; s32 x; s32 y; raw_x = hid_field_extract(ctlr->hdev, rep->left_stick, 0, 12); raw_y = hid_field_extract(ctlr->hdev, rep->left_stick + 1, 4, 12); x = joycon_map_stick_val(&ctlr->left_stick_cal_x, raw_x); y = -joycon_map_stick_val(&ctlr->left_stick_cal_y, raw_y); input_report_abs(ctlr->input, ABS_X, x); input_report_abs(ctlr->input, ABS_Y, y); } static void joycon_report_right_stick(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { u16 raw_x; u16 raw_y; s32 x; s32 y; raw_x = hid_field_extract(ctlr->hdev, rep->right_stick, 0, 12); raw_y = hid_field_extract(ctlr->hdev, rep->right_stick + 1, 4, 12); x = joycon_map_stick_val(&ctlr->right_stick_cal_x, raw_x); y = -joycon_map_stick_val(&ctlr->right_stick_cal_y, raw_y); input_report_abs(ctlr->input, ABS_RX, x); input_report_abs(ctlr->input, ABS_RY, y); } static void joycon_report_dpad(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { int hatx = 0; int haty = 0; u32 btns = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24); if (btns & JC_BTN_LEFT) hatx = -1; else if (btns & JC_BTN_RIGHT) hatx = 1; if (btns & JC_BTN_UP) haty = -1; else if (btns & JC_BTN_DOWN) haty = 1; input_report_abs(ctlr->input, ABS_HAT0X, hatx); input_report_abs(ctlr->input, ABS_HAT0Y, haty); } static void joycon_report_buttons(struct joycon_ctlr *ctlr, struct joycon_input_report *rep, const struct joycon_ctlr_button_mapping button_mappings[]) { const struct joycon_ctlr_button_mapping *button; u32 status = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24); for (button = button_mappings; button->code; button++) input_report_key(ctlr->input, button->code, status & button->bit); } static void joycon_parse_report(struct joycon_ctlr *ctlr, struct joycon_input_report *rep) { unsigned long flags; unsigned long msecs = jiffies_to_msecs(jiffies); unsigned long report_delta_ms = msecs - ctlr->last_input_report_msecs; if (joycon_has_rumble(ctlr)) joycon_handle_rumble_report(ctlr, rep); joycon_parse_battery_status(ctlr, rep); if (joycon_type_is_left_joycon(ctlr)) { joycon_report_left_stick(ctlr, rep); joycon_report_buttons(ctlr, rep, left_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_report_buttons(ctlr, rep, left_joycon_s_button_mappings); } else if (joycon_type_is_right_joycon(ctlr)) { joycon_report_right_stick(ctlr, rep); joycon_report_buttons(ctlr, rep, right_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_report_buttons(ctlr, rep, right_joycon_s_button_mappings); } else if (joycon_type_is_procon(ctlr)) { joycon_report_left_stick(ctlr, rep); joycon_report_right_stick(ctlr, rep); joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, procon_button_mappings); } else if (joycon_type_is_any_nescon(ctlr)) { joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, nescon_button_mappings); } else if (joycon_type_is_snescon(ctlr)) { joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, snescon_button_mappings); } else if (joycon_type_is_gencon(ctlr)) { joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, gencon_button_mappings); } else if (joycon_type_is_n64con(ctlr)) { joycon_report_left_stick(ctlr, rep); joycon_report_dpad(ctlr, rep); joycon_report_buttons(ctlr, rep, n64con_button_mappings); } input_sync(ctlr->input); spin_lock_irqsave(&ctlr->lock, flags); ctlr->last_input_report_msecs = msecs; /* * Was this input report a reasonable time delta compared to the prior * report? We use this information to decide when a safe time is to send * rumble packets or subcommand packets. */ if (report_delta_ms >= JC_INPUT_REPORT_MIN_DELTA && report_delta_ms <= JC_INPUT_REPORT_MAX_DELTA) { if (ctlr->consecutive_valid_report_deltas < JC_SUBCMD_VALID_DELTA_REQ) ctlr->consecutive_valid_report_deltas++; } else { ctlr->consecutive_valid_report_deltas = 0; } /* * Our consecutive valid report tracking is only relevant for * bluetooth-connected controllers. For USB devices, we're beholden to * USB's underlying polling rate anyway. Always set to the consecutive * delta requirement. */ if (ctlr->hdev->bus == BUS_USB) ctlr->consecutive_valid_report_deltas = JC_SUBCMD_VALID_DELTA_REQ; spin_unlock_irqrestore(&ctlr->lock, flags); /* * Immediately after receiving a report is the most reliable time to * send a subcommand to the controller. Wake any subcommand senders * waiting for a report. */ if (unlikely(mutex_is_locked(&ctlr->output_mutex))) { spin_lock_irqsave(&ctlr->lock, flags); ctlr->received_input_report = true; spin_unlock_irqrestore(&ctlr->lock, flags); wake_up(&ctlr->wait); } /* parse IMU data if present */ if ((rep->id == JC_INPUT_IMU_DATA) && joycon_has_imu(ctlr)) joycon_parse_imu_report(ctlr, rep); } static int joycon_send_rumble_data(struct joycon_ctlr *ctlr) { int ret; unsigned long flags; struct joycon_rumble_output rumble_output = { 0 }; spin_lock_irqsave(&ctlr->lock, flags); /* * If the controller has been removed, just return ENODEV so the LED * subsystem doesn't print invalid errors on removal. */ if (ctlr->ctlr_state == JOYCON_CTLR_STATE_REMOVED) { spin_unlock_irqrestore(&ctlr->lock, flags); return -ENODEV; } memcpy(rumble_output.rumble_data, ctlr->rumble_data[ctlr->rumble_queue_tail], JC_RUMBLE_DATA_SIZE); spin_unlock_irqrestore(&ctlr->lock, flags); rumble_output.output_id = JC_OUTPUT_RUMBLE_ONLY; rumble_output.packet_num = ctlr->subcmd_num; if (++ctlr->subcmd_num > 0xF) ctlr->subcmd_num = 0; joycon_enforce_subcmd_rate(ctlr); ret = __joycon_hid_send(ctlr->hdev, (u8 *)&rumble_output, sizeof(rumble_output)); return ret; } static void joycon_rumble_worker(struct work_struct *work) { struct joycon_ctlr *ctlr = container_of(work, struct joycon_ctlr, rumble_worker); unsigned long flags; bool again = true; int ret; while (again) { mutex_lock(&ctlr->output_mutex); ret = joycon_send_rumble_data(ctlr); mutex_unlock(&ctlr->output_mutex); /* -ENODEV means the controller was just unplugged */ spin_lock_irqsave(&ctlr->lock, flags); if (ret < 0 && ret != -ENODEV && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED) hid_warn(ctlr->hdev, "Failed to set rumble; e=%d", ret); ctlr->rumble_msecs = jiffies_to_msecs(jiffies); if (ctlr->rumble_queue_tail != ctlr->rumble_queue_head) { if (++ctlr->rumble_queue_tail >= JC_RUMBLE_QUEUE_SIZE) ctlr->rumble_queue_tail = 0; } else { again = false; } spin_unlock_irqrestore(&ctlr->lock, flags); } } #if IS_ENABLED(CONFIG_NINTENDO_FF) static struct joycon_rumble_freq_data joycon_find_rumble_freq(u16 freq) { const size_t length = ARRAY_SIZE(joycon_rumble_frequencies); const struct joycon_rumble_freq_data *data = joycon_rumble_frequencies; int i = 0; if (freq > data[0].freq) { for (i = 1; i < length - 1; i++) { if (freq > data[i - 1].freq && freq <= data[i].freq) break; } } return data[i]; } static struct joycon_rumble_amp_data joycon_find_rumble_amp(u16 amp) { const size_t length = ARRAY_SIZE(joycon_rumble_amplitudes); const struct joycon_rumble_amp_data *data = joycon_rumble_amplitudes; int i = 0; if (amp > data[0].amp) { for (i = 1; i < length - 1; i++) { if (amp > data[i - 1].amp && amp <= data[i].amp) break; } } return data[i]; } static void joycon_encode_rumble(u8 *data, u16 freq_low, u16 freq_high, u16 amp) { struct joycon_rumble_freq_data freq_data_low; struct joycon_rumble_freq_data freq_data_high; struct joycon_rumble_amp_data amp_data; freq_data_low = joycon_find_rumble_freq(freq_low); freq_data_high = joycon_find_rumble_freq(freq_high); amp_data = joycon_find_rumble_amp(amp); data[0] = (freq_data_high.high >> 8) & 0xFF; data[1] = (freq_data_high.high & 0xFF) + amp_data.high; data[2] = freq_data_low.low + ((amp_data.low >> 8) & 0xFF); data[3] = amp_data.low & 0xFF; } static const u16 JOYCON_MAX_RUMBLE_HIGH_FREQ = 1253; static const u16 JOYCON_MIN_RUMBLE_HIGH_FREQ = 82; static const u16 JOYCON_MAX_RUMBLE_LOW_FREQ = 626; static const u16 JOYCON_MIN_RUMBLE_LOW_FREQ = 41; static void joycon_clamp_rumble_freqs(struct joycon_ctlr *ctlr) { unsigned long flags; spin_lock_irqsave(&ctlr->lock, flags); ctlr->rumble_ll_freq = clamp(ctlr->rumble_ll_freq, JOYCON_MIN_RUMBLE_LOW_FREQ, JOYCON_MAX_RUMBLE_LOW_FREQ); ctlr->rumble_lh_freq = clamp(ctlr->rumble_lh_freq, JOYCON_MIN_RUMBLE_HIGH_FREQ, JOYCON_MAX_RUMBLE_HIGH_FREQ); ctlr->rumble_rl_freq = clamp(ctlr->rumble_rl_freq, JOYCON_MIN_RUMBLE_LOW_FREQ, JOYCON_MAX_RUMBLE_LOW_FREQ); ctlr->rumble_rh_freq = clamp(ctlr->rumble_rh_freq, JOYCON_MIN_RUMBLE_HIGH_FREQ, JOYCON_MAX_RUMBLE_HIGH_FREQ); spin_unlock_irqrestore(&ctlr->lock, flags); } static int joycon_set_rumble(struct joycon_ctlr *ctlr, u16 amp_r, u16 amp_l, bool schedule_now) { u8 data[JC_RUMBLE_DATA_SIZE]; u16 amp; u16 freq_r_low; u16 freq_r_high; u16 freq_l_low; u16 freq_l_high; unsigned long flags; int next_rq_head; spin_lock_irqsave(&ctlr->lock, flags); freq_r_low = ctlr->rumble_rl_freq; freq_r_high = ctlr->rumble_rh_freq; freq_l_low = ctlr->rumble_ll_freq; freq_l_high = ctlr->rumble_lh_freq; /* limit number of silent rumble packets to reduce traffic */ if (amp_l != 0 || amp_r != 0) ctlr->rumble_zero_countdown = JC_RUMBLE_ZERO_AMP_PKT_CNT; spin_unlock_irqrestore(&ctlr->lock, flags); /* right joy-con */ amp = amp_r * (u32)joycon_max_rumble_amp / 65535; joycon_encode_rumble(data + 4, freq_r_low, freq_r_high, amp); /* left joy-con */ amp = amp_l * (u32)joycon_max_rumble_amp / 65535; joycon_encode_rumble(data, freq_l_low, freq_l_high, amp); spin_lock_irqsave(&ctlr->lock, flags); next_rq_head = ctlr->rumble_queue_head + 1; if (next_rq_head >= JC_RUMBLE_QUEUE_SIZE) next_rq_head = 0; /* Did we overrun the circular buffer? * If so, be sure we keep the latest intended rumble state. */ if (next_rq_head == ctlr->rumble_queue_tail) { hid_dbg(ctlr->hdev, "rumble queue is full"); /* overwrite the prior value at the end of the circular buf */ next_rq_head = ctlr->rumble_queue_head; } ctlr->rumble_queue_head = next_rq_head; memcpy(ctlr->rumble_data[ctlr->rumble_queue_head], data, JC_RUMBLE_DATA_SIZE); /* don't wait for the periodic send (reduces latency) */ if (schedule_now && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED) queue_work(ctlr->rumble_queue, &ctlr->rumble_worker); spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } static int joycon_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct joycon_ctlr *ctlr = input_get_drvdata(dev); if (effect->type != FF_RUMBLE) return 0; return joycon_set_rumble(ctlr, effect->u.rumble.weak_magnitude, effect->u.rumble.strong_magnitude, true); } #endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */ static void joycon_config_left_stick(struct input_dev *idev) { input_set_abs_params(idev, ABS_X, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); input_set_abs_params(idev, ABS_Y, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); } static void joycon_config_right_stick(struct input_dev *idev) { input_set_abs_params(idev, ABS_RX, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); input_set_abs_params(idev, ABS_RY, -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG, JC_STICK_FUZZ, JC_STICK_FLAT); } static void joycon_config_dpad(struct input_dev *idev) { input_set_abs_params(idev, ABS_HAT0X, -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG, JC_DPAD_FUZZ, JC_DPAD_FLAT); input_set_abs_params(idev, ABS_HAT0Y, -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG, JC_DPAD_FUZZ, JC_DPAD_FLAT); } static void joycon_config_buttons(struct input_dev *idev, const struct joycon_ctlr_button_mapping button_mappings[]) { const struct joycon_ctlr_button_mapping *button; for (button = button_mappings; button->code; button++) input_set_capability(idev, EV_KEY, button->code); } static void joycon_config_rumble(struct joycon_ctlr *ctlr) { #if IS_ENABLED(CONFIG_NINTENDO_FF) /* set up rumble */ input_set_capability(ctlr->input, EV_FF, FF_RUMBLE); input_ff_create_memless(ctlr->input, NULL, joycon_play_effect); ctlr->rumble_ll_freq = JC_RUMBLE_DFLT_LOW_FREQ; ctlr->rumble_lh_freq = JC_RUMBLE_DFLT_HIGH_FREQ; ctlr->rumble_rl_freq = JC_RUMBLE_DFLT_LOW_FREQ; ctlr->rumble_rh_freq = JC_RUMBLE_DFLT_HIGH_FREQ; joycon_clamp_rumble_freqs(ctlr); joycon_set_rumble(ctlr, 0, 0, false); ctlr->rumble_msecs = jiffies_to_msecs(jiffies); #endif } static int joycon_imu_input_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev; const char *imu_name; int ret; hdev = ctlr->hdev; /* configure the imu input device */ ctlr->imu_input = devm_input_allocate_device(&hdev->dev); if (!ctlr->imu_input) return -ENOMEM; ctlr->imu_input->id.bustype = hdev->bus; ctlr->imu_input->id.vendor = hdev->vendor; ctlr->imu_input->id.product = hdev->product; ctlr->imu_input->id.version = hdev->version; ctlr->imu_input->uniq = ctlr->mac_addr_str; ctlr->imu_input->phys = hdev->phys; imu_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s (IMU)", ctlr->input->name); if (!imu_name) return -ENOMEM; ctlr->imu_input->name = imu_name; input_set_drvdata(ctlr->imu_input, ctlr); /* configure imu axes */ input_set_abs_params(ctlr->imu_input, ABS_X, -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG, JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT); input_set_abs_params(ctlr->imu_input, ABS_Y, -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG, JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT); input_set_abs_params(ctlr->imu_input, ABS_Z, -JC_IMU_MAX_ACCEL_MAG, JC_IMU_MAX_ACCEL_MAG, JC_IMU_ACCEL_FUZZ, JC_IMU_ACCEL_FLAT); input_abs_set_res(ctlr->imu_input, ABS_X, JC_IMU_ACCEL_RES_PER_G); input_abs_set_res(ctlr->imu_input, ABS_Y, JC_IMU_ACCEL_RES_PER_G); input_abs_set_res(ctlr->imu_input, ABS_Z, JC_IMU_ACCEL_RES_PER_G); input_set_abs_params(ctlr->imu_input, ABS_RX, -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG, JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT); input_set_abs_params(ctlr->imu_input, ABS_RY, -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG, JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT); input_set_abs_params(ctlr->imu_input, ABS_RZ, -JC_IMU_MAX_GYRO_MAG, JC_IMU_MAX_GYRO_MAG, JC_IMU_GYRO_FUZZ, JC_IMU_GYRO_FLAT); input_abs_set_res(ctlr->imu_input, ABS_RX, JC_IMU_GYRO_RES_PER_DPS); input_abs_set_res(ctlr->imu_input, ABS_RY, JC_IMU_GYRO_RES_PER_DPS); input_abs_set_res(ctlr->imu_input, ABS_RZ, JC_IMU_GYRO_RES_PER_DPS); __set_bit(EV_MSC, ctlr->imu_input->evbit); __set_bit(MSC_TIMESTAMP, ctlr->imu_input->mscbit); __set_bit(INPUT_PROP_ACCELEROMETER, ctlr->imu_input->propbit); ret = input_register_device(ctlr->imu_input); if (ret) return ret; return 0; } static int joycon_input_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev; int ret; hdev = ctlr->hdev; ctlr->input = devm_input_allocate_device(&hdev->dev); if (!ctlr->input) return -ENOMEM; ctlr->input->id.bustype = hdev->bus; ctlr->input->id.vendor = hdev->vendor; ctlr->input->id.product = hdev->product; ctlr->input->id.version = hdev->version; ctlr->input->uniq = ctlr->mac_addr_str; ctlr->input->name = hdev->name; ctlr->input->phys = hdev->phys; input_set_drvdata(ctlr->input, ctlr); ret = input_register_device(ctlr->input); if (ret) return ret; if (joycon_type_is_right_joycon(ctlr)) { joycon_config_right_stick(ctlr->input); joycon_config_buttons(ctlr->input, right_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_config_buttons(ctlr->input, right_joycon_s_button_mappings); } else if (joycon_type_is_left_joycon(ctlr)) { joycon_config_left_stick(ctlr->input); joycon_config_buttons(ctlr->input, left_joycon_button_mappings); if (!joycon_device_is_chrggrip(ctlr)) joycon_config_buttons(ctlr->input, left_joycon_s_button_mappings); } else if (joycon_type_is_procon(ctlr)) { joycon_config_left_stick(ctlr->input); joycon_config_right_stick(ctlr->input); joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, procon_button_mappings); } else if (joycon_type_is_any_nescon(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, nescon_button_mappings); } else if (joycon_type_is_snescon(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, snescon_button_mappings); } else if (joycon_type_is_gencon(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_buttons(ctlr->input, gencon_button_mappings); } else if (joycon_type_is_n64con(ctlr)) { joycon_config_dpad(ctlr->input); joycon_config_left_stick(ctlr->input); joycon_config_buttons(ctlr->input, n64con_button_mappings); } if (joycon_has_imu(ctlr)) { ret = joycon_imu_input_create(ctlr); if (ret) return ret; } if (joycon_has_rumble(ctlr)) joycon_config_rumble(ctlr); return 0; } /* Because the subcommand sets all the leds at once, the brightness argument is ignored */ static int joycon_player_led_brightness_set(struct led_classdev *led, enum led_brightness brightness) { struct device *dev = led->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct joycon_ctlr *ctlr; int val = 0; int i; int ret; ctlr = hid_get_drvdata(hdev); if (!ctlr) { hid_err(hdev, "No controller data\n"); return -ENODEV; } for (i = 0; i < JC_NUM_LEDS; i++) val |= ctlr->leds[i].brightness << i; mutex_lock(&ctlr->output_mutex); ret = joycon_set_player_leds(ctlr, 0, val); mutex_unlock(&ctlr->output_mutex); return ret; } static int joycon_home_led_brightness_set(struct led_classdev *led, enum led_brightness brightness) { struct device *dev = led->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct joycon_ctlr *ctlr; int ret; ctlr = hid_get_drvdata(hdev); if (!ctlr) { hid_err(hdev, "No controller data\n"); return -ENODEV; } mutex_lock(&ctlr->output_mutex); ret = joycon_set_home_led(ctlr, brightness); mutex_unlock(&ctlr->output_mutex); return ret; } static DEFINE_IDA(nintendo_player_id_allocator); static int joycon_leds_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev = ctlr->hdev; struct device *dev = &hdev->dev; const char *d_name = dev_name(dev); struct led_classdev *led; int led_val = 0; char *name; int ret; int i; int player_led_pattern; /* configure the player LEDs */ ctlr->player_id = U32_MAX; ret = ida_alloc(&nintendo_player_id_allocator, GFP_KERNEL); if (ret < 0) { hid_warn(hdev, "Failed to allocate player ID, skipping; ret=%d\n", ret); goto home_led; } ctlr->player_id = ret; player_led_pattern = ret % JC_NUM_LED_PATTERNS; hid_info(ctlr->hdev, "assigned player %d led pattern", player_led_pattern + 1); for (i = 0; i < JC_NUM_LEDS; i++) { name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s:%s", d_name, "green", joycon_player_led_names[i]); if (!name) return -ENOMEM; led = &ctlr->leds[i]; led->name = name; led->brightness = joycon_player_led_patterns[player_led_pattern][i]; led->max_brightness = 1; led->brightness_set_blocking = joycon_player_led_brightness_set; led->flags = LED_CORE_SUSPENDRESUME | LED_HW_PLUGGABLE; led_val |= joycon_player_led_patterns[player_led_pattern][i] << i; } mutex_lock(&ctlr->output_mutex); ret = joycon_set_player_leds(ctlr, 0, led_val); mutex_unlock(&ctlr->output_mutex); if (ret) { hid_warn(hdev, "Failed to set players LEDs, skipping registration; ret=%d\n", ret); goto home_led; } for (i = 0; i < JC_NUM_LEDS; i++) { led = &ctlr->leds[i]; ret = devm_led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "Failed to register player %d LED; ret=%d\n", i + 1, ret); return ret; } } home_led: /* configure the home LED */ if (jc_type_has_right(ctlr)) { name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s:%s", d_name, "blue", LED_FUNCTION_PLAYER5); if (!name) return -ENOMEM; led = &ctlr->home_led; led->name = name; led->brightness = 0; led->max_brightness = 0xF; led->brightness_set_blocking = joycon_home_led_brightness_set; led->flags = LED_CORE_SUSPENDRESUME | LED_HW_PLUGGABLE; /* Set the home LED to 0 as default state */ mutex_lock(&ctlr->output_mutex); ret = joycon_set_home_led(ctlr, 0); mutex_unlock(&ctlr->output_mutex); if (ret) { hid_warn(hdev, "Failed to set home LED, skipping registration; ret=%d\n", ret); return 0; } ret = devm_led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "Failed to register home LED; ret=%d\n", ret); return ret; } } return 0; } static int joycon_battery_get_property(struct power_supply *supply, enum power_supply_property prop, union power_supply_propval *val) { struct joycon_ctlr *ctlr = power_supply_get_drvdata(supply); unsigned long flags; int ret = 0; u8 capacity; bool charging; bool powered; spin_lock_irqsave(&ctlr->lock, flags); capacity = ctlr->battery_capacity; charging = ctlr->battery_charging; powered = ctlr->host_powered; spin_unlock_irqrestore(&ctlr->lock, flags); switch (prop) { case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = capacity; break; case POWER_SUPPLY_PROP_STATUS: if (charging) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (capacity == POWER_SUPPLY_CAPACITY_LEVEL_FULL && powered) val->intval = POWER_SUPPLY_STATUS_FULL; else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property joycon_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_STATUS, }; static int joycon_power_supply_create(struct joycon_ctlr *ctlr) { struct hid_device *hdev = ctlr->hdev; struct power_supply_config supply_config = { .drv_data = ctlr, }; const char * const name_fmt = "nintendo_switch_controller_battery_%s"; int ret = 0; /* Set initially to unknown before receiving first input report */ ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; /* Configure the battery's description */ ctlr->battery_desc.properties = joycon_battery_props; ctlr->battery_desc.num_properties = ARRAY_SIZE(joycon_battery_props); ctlr->battery_desc.get_property = joycon_battery_get_property; ctlr->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; ctlr->battery_desc.use_for_apm = 0; ctlr->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, name_fmt, dev_name(&hdev->dev)); if (!ctlr->battery_desc.name) return -ENOMEM; ctlr->battery = devm_power_supply_register(&hdev->dev, &ctlr->battery_desc, &supply_config); if (IS_ERR(ctlr->battery)) { ret = PTR_ERR(ctlr->battery); hid_err(hdev, "Failed to register battery; ret=%d\n", ret); return ret; } return power_supply_powers(ctlr->battery, &hdev->dev); } static int joycon_read_info(struct joycon_ctlr *ctlr) { int ret; int i; int j; struct joycon_subcmd_request req = { 0 }; struct joycon_input_report *report; req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO; ret = joycon_send_subcmd(ctlr, &req, 0, HZ); if (ret) { hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret); return ret; } report = (struct joycon_input_report *)ctlr->input_buf; for (i = 4, j = 0; j < 6; i++, j++) ctlr->mac_addr[j] = report->subcmd_reply.data[i]; ctlr->mac_addr_str = devm_kasprintf(&ctlr->hdev->dev, GFP_KERNEL, "%02X:%02X:%02X:%02X:%02X:%02X", ctlr->mac_addr[0], ctlr->mac_addr[1], ctlr->mac_addr[2], ctlr->mac_addr[3], ctlr->mac_addr[4], ctlr->mac_addr[5]); if (!ctlr->mac_addr_str) return -ENOMEM; hid_info(ctlr->hdev, "controller MAC = %s\n", ctlr->mac_addr_str); /* * Retrieve the type so we can distinguish the controller type * Unfortantly the hdev->product can't always be used due to a ?bug? * with the NSO Genesis controller. Over USB, it will report the * PID as 0x201E, but over bluetooth it will report the PID as 0x2017 * which is the same as the NSO SNES controller. This is different from * the rest of the controllers which will report the same PID over USB * and bluetooth. */ ctlr->ctlr_type = report->subcmd_reply.data[2]; hid_dbg(ctlr->hdev, "controller type = 0x%02X\n", ctlr->ctlr_type); return 0; } static int joycon_init(struct hid_device *hdev) { struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); int ret = 0; mutex_lock(&ctlr->output_mutex); /* if handshake command fails, assume ble pro controller */ if (joycon_using_usb(ctlr) && !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) { hid_dbg(hdev, "detected USB controller\n"); /* set baudrate for improved latency */ ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ); if (ret) { /* * We can function with the default baudrate. * Provide a warning, and continue on. */ hid_warn(hdev, "Failed to set baudrate (ret=%d), continuing anyway\n", ret); } /* handshake */ ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ); if (ret) { hid_err(hdev, "Failed handshake; ret=%d\n", ret); goto out_unlock; } /* * Set no timeout (to keep controller in USB mode). * This doesn't send a response, so ignore the timeout. */ joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT, HZ/10); } else if (jc_type_is_chrggrip(ctlr)) { hid_err(hdev, "Failed charging grip handshake\n"); ret = -ETIMEDOUT; goto out_unlock; } /* needed to retrieve the controller type */ ret = joycon_read_info(ctlr); if (ret) { hid_err(hdev, "Failed to retrieve controller info; ret=%d\n", ret); goto out_unlock; } if (joycon_has_joysticks(ctlr)) { /* get controller calibration data, and parse it */ ret = joycon_request_calibration(ctlr); if (ret) { /* * We can function with default calibration, but it may be * inaccurate. Provide a warning, and continue on. */ hid_warn(hdev, "Analog stick positions may be inaccurate\n"); } } if (joycon_has_imu(ctlr)) { /* get IMU calibration data, and parse it */ ret = joycon_request_imu_calibration(ctlr); if (ret) { /* * We can function with default calibration, but it may be * inaccurate. Provide a warning, and continue on. */ hid_warn(hdev, "Unable to read IMU calibration data\n"); } /* Enable the IMU */ ret = joycon_enable_imu(ctlr); if (ret) { hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret); goto out_unlock; } } /* Set the reporting mode to 0x30, which is the full report mode */ ret = joycon_set_report_mode(ctlr); if (ret) { hid_err(hdev, "Failed to set report mode; ret=%d\n", ret); goto out_unlock; } if (joycon_has_rumble(ctlr)) { /* Enable rumble */ ret = joycon_enable_rumble(ctlr); if (ret) { hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret); goto out_unlock; } } out_unlock: mutex_unlock(&ctlr->output_mutex); return ret; } /* Common handler for parsing inputs */ static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data, int size) { if (data[0] == JC_INPUT_SUBCMD_REPLY || data[0] == JC_INPUT_IMU_DATA || data[0] == JC_INPUT_MCU_DATA) { if (size >= 12) /* make sure it contains the input report */ joycon_parse_report(ctlr, (struct joycon_input_report *)data); } return 0; } static int joycon_ctlr_handle_event(struct joycon_ctlr *ctlr, u8 *data, int size) { int ret = 0; bool match = false; struct joycon_input_report *report; if (unlikely(mutex_is_locked(&ctlr->output_mutex)) && ctlr->msg_type != JOYCON_MSG_TYPE_NONE) { switch (ctlr->msg_type) { case JOYCON_MSG_TYPE_USB: if (size < 2) break; if (data[0] == JC_INPUT_USB_RESPONSE && data[1] == ctlr->usb_ack_match) match = true; break; case JOYCON_MSG_TYPE_SUBCMD: if (size < sizeof(struct joycon_input_report) || data[0] != JC_INPUT_SUBCMD_REPLY) break; report = (struct joycon_input_report *)data; if (report->subcmd_reply.id == ctlr->subcmd_ack_match) match = true; break; default: break; } if (match) { memcpy(ctlr->input_buf, data, min(size, (int)JC_MAX_RESP_SIZE)); ctlr->msg_type = JOYCON_MSG_TYPE_NONE; ctlr->received_resp = true; wake_up(&ctlr->wait); /* This message has been handled */ return 1; } } if (ctlr->ctlr_state == JOYCON_CTLR_STATE_READ) ret = joycon_ctlr_read_handler(ctlr, data, size); return ret; } static int nintendo_hid_event(struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int size) { struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); if (size < 1) return -EINVAL; return joycon_ctlr_handle_event(ctlr, raw_data, size); } static int nintendo_hid_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct joycon_ctlr *ctlr; hid_dbg(hdev, "probe - start\n"); ctlr = devm_kzalloc(&hdev->dev, sizeof(*ctlr), GFP_KERNEL); if (!ctlr) { ret = -ENOMEM; goto err; } ctlr->hdev = hdev; ctlr->ctlr_state = JOYCON_CTLR_STATE_INIT; ctlr->rumble_queue_head = 0; ctlr->rumble_queue_tail = 0; hid_set_drvdata(hdev, ctlr); mutex_init(&ctlr->output_mutex); init_waitqueue_head(&ctlr->wait); spin_lock_init(&ctlr->lock); ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); if (!ctlr->rumble_queue) { ret = -ENOMEM; goto err; } INIT_WORK(&ctlr->rumble_worker, joycon_rumble_worker); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "HID parse failed\n"); goto err_wq; } /* * Patch the hw version of pro controller/joycons, so applications can * distinguish between the default HID mappings and the mappings defined * by the Linux game controller spec. This is important for the SDL2 * library, which has a game controller database, which uses device ids * in combination with version as a key. */ hdev->version |= 0x8000; ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "HW start failed\n"); goto err_wq; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "cannot start hardware I/O\n"); goto err_stop; } hid_device_io_start(hdev); ret = joycon_init(hdev); if (ret) { hid_err(hdev, "Failed to initialize controller; ret=%d\n", ret); goto err_close; } /* Initialize the leds */ ret = joycon_leds_create(ctlr); if (ret) { hid_err(hdev, "Failed to create leds; ret=%d\n", ret); goto err_close; } /* Initialize the battery power supply */ ret = joycon_power_supply_create(ctlr); if (ret) { hid_err(hdev, "Failed to create power_supply; ret=%d\n", ret); goto err_ida; } ret = joycon_input_create(ctlr); if (ret) { hid_err(hdev, "Failed to create input device; ret=%d\n", ret); goto err_ida; } ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; hid_dbg(hdev, "probe - success\n"); return 0; err_ida: ida_free(&nintendo_player_id_allocator, ctlr->player_id); err_close: hid_hw_close(hdev); err_stop: hid_hw_stop(hdev); err_wq: destroy_workqueue(ctlr->rumble_queue); err: hid_err(hdev, "probe - fail = %d\n", ret); return ret; } static void nintendo_hid_remove(struct hid_device *hdev) { struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); unsigned long flags; hid_dbg(hdev, "remove\n"); /* Prevent further attempts at sending subcommands. */ spin_lock_irqsave(&ctlr->lock, flags); ctlr->ctlr_state = JOYCON_CTLR_STATE_REMOVED; spin_unlock_irqrestore(&ctlr->lock, flags); destroy_workqueue(ctlr->rumble_queue); ida_free(&nintendo_player_id_allocator, ctlr->player_id); hid_hw_close(hdev); hid_hw_stop(hdev); } #ifdef CONFIG_PM static int nintendo_hid_resume(struct hid_device *hdev) { int ret = joycon_init(hdev); if (ret) hid_err(hdev, "Failed to restore controller after resume"); return ret; } #endif static const struct hid_device_id nintendo_hid_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_SNESCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_GENCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_N64CON) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_PROCON) }, { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_CHRGGRIP) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONL) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_JOYCONR) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_SNESCON) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_GENCON) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_N64CON) }, { } }; MODULE_DEVICE_TABLE(hid, nintendo_hid_devices); static struct hid_driver nintendo_hid_driver = { .name = "nintendo", .id_table = nintendo_hid_devices, .probe = nintendo_hid_probe, .remove = nintendo_hid_remove, .raw_event = nintendo_hid_event, #ifdef CONFIG_PM .resume = nintendo_hid_resume, #endif }; static int __init nintendo_init(void) { return hid_register_driver(&nintendo_hid_driver); } static void __exit nintendo_exit(void) { hid_unregister_driver(&nintendo_hid_driver); ida_destroy(&nintendo_player_id_allocator); } module_init(nintendo_init); module_exit(nintendo_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ryan McClelland <rymcclel@gmail.com>"); MODULE_AUTHOR("Emily Strickland <linux@emily.st>"); MODULE_AUTHOR("Daniel J. Ogorchock <djogorchock@gmail.com>"); MODULE_DESCRIPTION("Driver for Nintendo Switch Controllers");
3 3 13 14 9 9 14 14 14 14 14 14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 // SPDX-License-Identifier: GPL-2.0-only /* * x86-optimized CRC32 functions * * Copyright (C) 2008 Intel Corporation * Copyright 2012 Xyratex Technology Limited * Copyright 2024 Google LLC */ #include <linux/crc32.h> #include <linux/module.h> #include "crc-pclmul-template.h" static DEFINE_STATIC_KEY_FALSE(have_crc32); static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq); DECLARE_CRC_PCLMUL_FUNCS(crc32_lsb, u32); u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { CRC_PCLMUL(crc, p, len, crc32_lsb, crc32_lsb_0xedb88320_consts, have_pclmulqdq); return crc32_le_base(crc, p, len); } EXPORT_SYMBOL(crc32_le_arch); #ifdef CONFIG_X86_64 #define CRC32_INST "crc32q %1, %q0" #else #define CRC32_INST "crc32l %1, %0" #endif /* * Use carryless multiply version of crc32c when buffer size is >= 512 to * account for FPU state save/restore overhead. */ #define CRC32C_PCLMUL_BREAKEVEN 512 asmlinkage u32 crc32c_x86_3way(u32 crc, const u8 *buffer, size_t len); u32 crc32c_arch(u32 crc, const u8 *p, size_t len) { size_t num_longs; if (!static_branch_likely(&have_crc32)) return crc32c_base(crc, p, len); if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN && static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) { kernel_fpu_begin(); crc = crc32c_x86_3way(crc, p, len); kernel_fpu_end(); return crc; } for (num_longs = len / sizeof(unsigned long); num_longs != 0; num_longs--, p += sizeof(unsigned long)) asm(CRC32_INST : "+r" (crc) : ASM_INPUT_RM (*(unsigned long *)p)); if (sizeof(unsigned long) > 4 && (len & 4)) { asm("crc32l %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u32 *)p)); p += 4; } if (len & 2) { asm("crc32w %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u16 *)p)); p += 2; } if (len & 1) asm("crc32b %1, %0" : "+r" (crc) : ASM_INPUT_RM (*p)); return crc; } EXPORT_SYMBOL(crc32c_arch); u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) { return crc32_be_base(crc, p, len); } EXPORT_SYMBOL(crc32_be_arch); static int __init crc32_x86_init(void) { if (boot_cpu_has(X86_FEATURE_XMM4_2)) static_branch_enable(&have_crc32); if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { static_branch_enable(&have_pclmulqdq); INIT_CRC_PCLMUL(crc32_lsb); } return 0; } arch_initcall(crc32_x86_init); static void __exit crc32_x86_exit(void) { } module_exit(crc32_x86_exit); u32 crc32_optimizations(void) { u32 optimizations = 0; if (static_key_enabled(&have_crc32)) optimizations |= CRC32C_OPTIMIZATION; if (static_key_enabled(&have_pclmulqdq)) optimizations |= CRC32_LE_OPTIMIZATION; return optimizations; } EXPORT_SYMBOL(crc32_optimizations); MODULE_DESCRIPTION("x86-optimized CRC32 functions"); MODULE_LICENSE("GPL");
55 6 1 1 6 2 2 2 2 2 2 7 6 2 2 2 2 2 2 2 2 171 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/fcntl.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/syscalls.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched/task.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/file.h> #include <linux/capability.h> #include <linux/dnotify.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pipe_fs_i.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/rcupdate.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include <linux/memfd.h> #include <linux/compat.h> #include <linux/mount.h> #include <linux/rw_hint.h> #include <linux/poll.h> #include <asm/siginfo.h> #include <linux/uaccess.h> #include "internal.h" #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) static int setfl(int fd, struct file * filp, unsigned int arg) { struct inode * inode = file_inode(filp); int error = 0; /* * O_APPEND cannot be cleared if the file is marked as append-only * and the file is open for write. */ if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) return -EPERM; /* O_NOATIME can only be set by the owner or superuser */ if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) if (!inode_owner_or_capable(file_mnt_idmap(filp), inode)) return -EPERM; /* required for strict SunOS emulation */ if (O_NONBLOCK != O_NDELAY) if (arg & O_NDELAY) arg |= O_NONBLOCK; /* Pipe packetized mode is controlled by O_DIRECT flag */ if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT) && !(filp->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; if (filp->f_op->check_flags) error = filp->f_op->check_flags(arg); if (error) return error; /* * ->fasync() is responsible for setting the FASYNC bit. */ if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) { error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); if (error < 0) goto out; if (error > 0) error = 0; } spin_lock(&filp->f_lock); filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); filp->f_iocb_flags = iocb_flags(filp); spin_unlock(&filp->f_lock); out: return error; } /* * Allocate an file->f_owner struct if it doesn't exist, handling racing * allocations correctly. */ int file_f_owner_allocate(struct file *file) { struct fown_struct *f_owner; f_owner = file_f_owner(file); if (f_owner) return 0; f_owner = kzalloc(sizeof(struct fown_struct), GFP_KERNEL); if (!f_owner) return -ENOMEM; rwlock_init(&f_owner->lock); f_owner->file = file; /* If someone else raced us, drop our allocation. */ if (unlikely(cmpxchg(&file->f_owner, NULL, f_owner))) kfree(f_owner); return 0; } EXPORT_SYMBOL(file_f_owner_allocate); void file_f_owner_release(struct file *file) { struct fown_struct *f_owner; f_owner = file_f_owner(file); if (f_owner) { put_pid(f_owner->pid); kfree(f_owner); } } void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, int force) { struct fown_struct *f_owner; f_owner = file_f_owner(filp); if (WARN_ON_ONCE(!f_owner)) return; write_lock_irq(&f_owner->lock); if (force || !f_owner->pid) { put_pid(f_owner->pid); f_owner->pid = get_pid(pid); f_owner->pid_type = type; if (pid) { const struct cred *cred = current_cred(); security_file_set_fowner(filp); f_owner->uid = cred->uid; f_owner->euid = cred->euid; } } write_unlock_irq(&f_owner->lock); } EXPORT_SYMBOL(__f_setown); int f_setown(struct file *filp, int who, int force) { enum pid_type type; struct pid *pid = NULL; int ret = 0; might_sleep(); type = PIDTYPE_TGID; if (who < 0) { /* avoid overflow below */ if (who == INT_MIN) return -EINVAL; type = PIDTYPE_PGID; who = -who; } ret = file_f_owner_allocate(filp); if (ret) return ret; rcu_read_lock(); if (who) { pid = find_vpid(who); if (!pid) ret = -ESRCH; } if (!ret) __f_setown(filp, pid, type, force); rcu_read_unlock(); return ret; } EXPORT_SYMBOL(f_setown); void f_delown(struct file *filp) { __f_setown(filp, NULL, PIDTYPE_TGID, 1); } pid_t f_getown(struct file *filp) { pid_t pid = 0; struct fown_struct *f_owner; f_owner = file_f_owner(filp); if (!f_owner) return pid; read_lock_irq(&f_owner->lock); rcu_read_lock(); if (pid_task(f_owner->pid, f_owner->pid_type)) { pid = pid_vnr(f_owner->pid); if (f_owner->pid_type == PIDTYPE_PGID) pid = -pid; } rcu_read_unlock(); read_unlock_irq(&f_owner->lock); return pid; } static int f_setown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex __user *owner_p = (void __user *)arg; struct f_owner_ex owner; struct pid *pid; int type; int ret; ret = copy_from_user(&owner, owner_p, sizeof(owner)); if (ret) return -EFAULT; switch (owner.type) { case F_OWNER_TID: type = PIDTYPE_PID; break; case F_OWNER_PID: type = PIDTYPE_TGID; break; case F_OWNER_PGRP: type = PIDTYPE_PGID; break; default: return -EINVAL; } ret = file_f_owner_allocate(filp); if (ret) return ret; rcu_read_lock(); pid = find_vpid(owner.pid); if (owner.pid && !pid) ret = -ESRCH; else __f_setown(filp, pid, type, 1); rcu_read_unlock(); return ret; } static int f_getown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex __user *owner_p = (void __user *)arg; struct f_owner_ex owner = {}; int ret = 0; struct fown_struct *f_owner; enum pid_type pid_type = PIDTYPE_PID; f_owner = file_f_owner(filp); if (f_owner) { read_lock_irq(&f_owner->lock); rcu_read_lock(); if (pid_task(f_owner->pid, f_owner->pid_type)) owner.pid = pid_vnr(f_owner->pid); rcu_read_unlock(); pid_type = f_owner->pid_type; } switch (pid_type) { case PIDTYPE_PID: owner.type = F_OWNER_TID; break; case PIDTYPE_TGID: owner.type = F_OWNER_PID; break; case PIDTYPE_PGID: owner.type = F_OWNER_PGRP; break; default: WARN_ON(1); ret = -EINVAL; break; } if (f_owner) read_unlock_irq(&f_owner->lock); if (!ret) { ret = copy_to_user(owner_p, &owner, sizeof(owner)); if (ret) ret = -EFAULT; } return ret; } #ifdef CONFIG_CHECKPOINT_RESTORE static int f_getowner_uids(struct file *filp, unsigned long arg) { struct user_namespace *user_ns = current_user_ns(); struct fown_struct *f_owner; uid_t __user *dst = (void __user *)arg; uid_t src[2] = {0, 0}; int err; f_owner = file_f_owner(filp); if (f_owner) { read_lock_irq(&f_owner->lock); src[0] = from_kuid(user_ns, f_owner->uid); src[1] = from_kuid(user_ns, f_owner->euid); read_unlock_irq(&f_owner->lock); } err = put_user(src[0], &dst[0]); err |= put_user(src[1], &dst[1]); return err; } #else static int f_getowner_uids(struct file *filp, unsigned long arg) { return -EINVAL; } #endif static bool rw_hint_valid(u64 hint) { BUILD_BUG_ON(WRITE_LIFE_NOT_SET != RWH_WRITE_LIFE_NOT_SET); BUILD_BUG_ON(WRITE_LIFE_NONE != RWH_WRITE_LIFE_NONE); BUILD_BUG_ON(WRITE_LIFE_SHORT != RWH_WRITE_LIFE_SHORT); BUILD_BUG_ON(WRITE_LIFE_MEDIUM != RWH_WRITE_LIFE_MEDIUM); BUILD_BUG_ON(WRITE_LIFE_LONG != RWH_WRITE_LIFE_LONG); BUILD_BUG_ON(WRITE_LIFE_EXTREME != RWH_WRITE_LIFE_EXTREME); switch (hint) { case RWH_WRITE_LIFE_NOT_SET: case RWH_WRITE_LIFE_NONE: case RWH_WRITE_LIFE_SHORT: case RWH_WRITE_LIFE_MEDIUM: case RWH_WRITE_LIFE_LONG: case RWH_WRITE_LIFE_EXTREME: return true; default: return false; } } static long fcntl_get_rw_hint(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); u64 __user *argp = (u64 __user *)arg; u64 hint = READ_ONCE(inode->i_write_hint); if (copy_to_user(argp, &hint, sizeof(*argp))) return -EFAULT; return 0; } static long fcntl_set_rw_hint(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); u64 __user *argp = (u64 __user *)arg; u64 hint; if (!inode_owner_or_capable(file_mnt_idmap(file), inode)) return -EPERM; if (copy_from_user(&hint, argp, sizeof(hint))) return -EFAULT; if (!rw_hint_valid(hint)) return -EINVAL; WRITE_ONCE(inode->i_write_hint, hint); /* * file->f_mapping->host may differ from inode. As an example, * blkdev_open() modifies file->f_mapping. */ if (file->f_mapping->host != inode) WRITE_ONCE(file->f_mapping->host->i_write_hint, hint); return 0; } /* Is the file descriptor a dup of the file? */ static long f_dupfd_query(int fd, struct file *filp) { CLASS(fd_raw, f)(fd); if (fd_empty(f)) return -EBADF; /* * We can do the 'fdput()' immediately, as the only thing that * matters is the pointer value which isn't changed by the fdput. * * Technically we didn't need a ref at all, and 'fdget()' was * overkill, but given our lockless file pointer lookup, the * alternatives are complicated. */ return fd_file(f) == filp; } /* Let the caller figure out whether a given file was just created. */ static long f_created_query(const struct file *filp) { return !!(filp->f_mode & FMODE_CREATED); } static int f_owner_sig(struct file *filp, int signum, bool setsig) { int ret = 0; struct fown_struct *f_owner; might_sleep(); if (setsig) { if (!valid_signal(signum)) return -EINVAL; ret = file_f_owner_allocate(filp); if (ret) return ret; } f_owner = file_f_owner(filp); if (setsig) f_owner->signum = signum; else if (f_owner) ret = f_owner->signum; return ret; } static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, struct file *filp) { void __user *argp = (void __user *)arg; int argi = (int)arg; struct flock flock; long err = -EINVAL; switch (cmd) { case F_CREATED_QUERY: err = f_created_query(filp); break; case F_DUPFD: err = f_dupfd(argi, filp, 0); break; case F_DUPFD_CLOEXEC: err = f_dupfd(argi, filp, O_CLOEXEC); break; case F_DUPFD_QUERY: err = f_dupfd_query(argi, filp); break; case F_GETFD: err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; break; case F_SETFD: err = 0; set_close_on_exec(fd, argi & FD_CLOEXEC); break; case F_GETFL: err = filp->f_flags; break; case F_SETFL: err = setfl(fd, filp, argi); break; #if BITS_PER_LONG != 32 /* 32-bit arches must use fcntl64() */ case F_OFD_GETLK: #endif case F_GETLK: if (copy_from_user(&flock, argp, sizeof(flock))) return -EFAULT; err = fcntl_getlk(filp, cmd, &flock); if (!err && copy_to_user(argp, &flock, sizeof(flock))) return -EFAULT; break; #if BITS_PER_LONG != 32 /* 32-bit arches must use fcntl64() */ case F_OFD_SETLK: case F_OFD_SETLKW: fallthrough; #endif case F_SETLK: case F_SETLKW: if (copy_from_user(&flock, argp, sizeof(flock))) return -EFAULT; err = fcntl_setlk(fd, filp, cmd, &flock); break; case F_GETOWN: /* * XXX If f_owner is a process group, the * negative return value will get converted * into an error. Oops. If we keep the * current syscall conventions, the only way * to fix this will be in libc. */ err = f_getown(filp); force_successful_syscall_return(); break; case F_SETOWN: err = f_setown(filp, argi, 1); break; case F_GETOWN_EX: err = f_getown_ex(filp, arg); break; case F_SETOWN_EX: err = f_setown_ex(filp, arg); break; case F_GETOWNER_UIDS: err = f_getowner_uids(filp, arg); break; case F_GETSIG: err = f_owner_sig(filp, 0, false); break; case F_SETSIG: err = f_owner_sig(filp, argi, true); break; case F_GETLEASE: err = fcntl_getlease(filp); break; case F_SETLEASE: err = fcntl_setlease(fd, filp, argi); break; case F_NOTIFY: err = fcntl_dirnotify(fd, filp, argi); break; case F_SETPIPE_SZ: case F_GETPIPE_SZ: err = pipe_fcntl(filp, cmd, argi); break; case F_ADD_SEALS: case F_GET_SEALS: err = memfd_fcntl(filp, cmd, argi); break; case F_GET_RW_HINT: err = fcntl_get_rw_hint(filp, cmd, arg); break; case F_SET_RW_HINT: err = fcntl_set_rw_hint(filp, cmd, arg); break; default: break; } return err; } static int check_fcntl_cmd(unsigned cmd) { switch (cmd) { case F_CREATED_QUERY: case F_DUPFD: case F_DUPFD_CLOEXEC: case F_DUPFD_QUERY: case F_GETFD: case F_SETFD: case F_GETFL: return 1; } return 0; } SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { CLASS(fd_raw, f)(fd); long err; if (fd_empty(f)) return -EBADF; if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) return -EBADF; } err = security_file_fcntl(fd_file(f), cmd, arg); if (!err) err = do_fcntl(fd, cmd, arg, fd_file(f)); return err; } #if BITS_PER_LONG == 32 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { void __user *argp = (void __user *)arg; CLASS(fd_raw, f)(fd); struct flock64 flock; long err; if (fd_empty(f)) return -EBADF; if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) return -EBADF; } err = security_file_fcntl(fd_file(f), cmd, arg); if (err) return err; switch (cmd) { case F_GETLK64: case F_OFD_GETLK: err = -EFAULT; if (copy_from_user(&flock, argp, sizeof(flock))) break; err = fcntl_getlk64(fd_file(f), cmd, &flock); if (!err && copy_to_user(argp, &flock, sizeof(flock))) err = -EFAULT; break; case F_SETLK64: case F_SETLKW64: case F_OFD_SETLK: case F_OFD_SETLKW: err = -EFAULT; if (copy_from_user(&flock, argp, sizeof(flock))) break; err = fcntl_setlk64(fd, fd_file(f), cmd, &flock); break; default: err = do_fcntl(fd, cmd, arg, fd_file(f)); break; } return err; } #endif #ifdef CONFIG_COMPAT /* careful - don't use anywhere else */ #define copy_flock_fields(dst, src) \ (dst)->l_type = (src)->l_type; \ (dst)->l_whence = (src)->l_whence; \ (dst)->l_start = (src)->l_start; \ (dst)->l_len = (src)->l_len; \ (dst)->l_pid = (src)->l_pid; static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl) { struct compat_flock fl; if (copy_from_user(&fl, ufl, sizeof(struct compat_flock))) return -EFAULT; copy_flock_fields(kfl, &fl); return 0; } static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl) { struct compat_flock64 fl; if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64))) return -EFAULT; copy_flock_fields(kfl, &fl); return 0; } static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl) { struct compat_flock fl; memset(&fl, 0, sizeof(struct compat_flock)); copy_flock_fields(&fl, kfl); if (copy_to_user(ufl, &fl, sizeof(struct compat_flock))) return -EFAULT; return 0; } static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl) { struct compat_flock64 fl; BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start)); BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len)); memset(&fl, 0, sizeof(struct compat_flock64)); copy_flock_fields(&fl, kfl); if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64))) return -EFAULT; return 0; } #undef copy_flock_fields static unsigned int convert_fcntl_cmd(unsigned int cmd) { switch (cmd) { case F_GETLK64: return F_GETLK; case F_SETLK64: return F_SETLK; case F_SETLKW64: return F_SETLKW; } return cmd; } /* * GETLK was successful and we need to return the data, but it needs to fit in * the compat structure. * l_start shouldn't be too big, unless the original start + end is greater than * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return * -EOVERFLOW in that case. l_len could be too big, in which case we just * truncate it, and only allow the app to see that part of the conflicting lock * that might make sense to it anyway */ static int fixup_compat_flock(struct flock *flock) { if (flock->l_start > COMPAT_OFF_T_MAX) return -EOVERFLOW; if (flock->l_len > COMPAT_OFF_T_MAX) flock->l_len = COMPAT_OFF_T_MAX; return 0; } static long do_compat_fcntl64(unsigned int fd, unsigned int cmd, compat_ulong_t arg) { CLASS(fd_raw, f)(fd); struct flock flock; long err; if (fd_empty(f)) return -EBADF; if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) return -EBADF; } err = security_file_fcntl(fd_file(f), cmd, arg); if (err) return err; switch (cmd) { case F_GETLK: err = get_compat_flock(&flock, compat_ptr(arg)); if (err) break; err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock); if (err) break; err = fixup_compat_flock(&flock); if (!err) err = put_compat_flock(&flock, compat_ptr(arg)); break; case F_GETLK64: case F_OFD_GETLK: err = get_compat_flock64(&flock, compat_ptr(arg)); if (err) break; err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock); if (!err) err = put_compat_flock64(&flock, compat_ptr(arg)); break; case F_SETLK: case F_SETLKW: err = get_compat_flock(&flock, compat_ptr(arg)); if (err) break; err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock); break; case F_SETLK64: case F_SETLKW64: case F_OFD_SETLK: case F_OFD_SETLKW: err = get_compat_flock64(&flock, compat_ptr(arg)); if (err) break; err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock); break; default: err = do_fcntl(fd, cmd, arg, fd_file(f)); break; } return err; } COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { return do_compat_fcntl64(fd, cmd, arg); } COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { switch (cmd) { case F_GETLK64: case F_SETLK64: case F_SETLKW64: case F_OFD_GETLK: case F_OFD_SETLK: case F_OFD_SETLKW: return -EINVAL; } return do_compat_fcntl64(fd, cmd, arg); } #endif /* Table to convert sigio signal codes into poll band bitmaps */ static const __poll_t band_table[NSIGPOLL] = { EPOLLIN | EPOLLRDNORM, /* POLL_IN */ EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */ EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */ EPOLLERR, /* POLL_ERR */ EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */ EPOLLHUP | EPOLLERR /* POLL_HUP */ }; static inline int sigio_perm(struct task_struct *p, struct fown_struct *fown, int sig) { const struct cred *cred; int ret; rcu_read_lock(); cred = __task_cred(p); ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) || uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) || uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) && !security_file_send_sigiotask(p, fown, sig)); rcu_read_unlock(); return ret; } static void send_sigio_to_task(struct task_struct *p, struct fown_struct *fown, int fd, int reason, enum pid_type type) { /* * F_SETSIG can change ->signum lockless in parallel, make * sure we read it once and use the same value throughout. */ int signum = READ_ONCE(fown->signum); if (!sigio_perm(p, fown, signum)) return; switch (signum) { default: { kernel_siginfo_t si; /* Queue a rt signal with the appropriate fd as its value. We use SI_SIGIO as the source, not SI_KERNEL, since kernel signals always get delivered even if we can't queue. Failure to queue in this case _should_ be reported; we fall back to SIGIO in that case. --sct */ clear_siginfo(&si); si.si_signo = signum; si.si_errno = 0; si.si_code = reason; /* * Posix definies POLL_IN and friends to be signal * specific si_codes for SIG_POLL. Linux extended * these si_codes to other signals in a way that is * ambiguous if other signals also have signal * specific si_codes. In that case use SI_SIGIO instead * to remove the ambiguity. */ if ((signum != SIGPOLL) && sig_specific_sicodes(signum)) si.si_code = SI_SIGIO; /* Make sure we are called with one of the POLL_* reasons, otherwise we could leak kernel stack into userspace. */ BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL)); if (reason - POLL_IN >= NSIGPOLL) si.si_band = ~0L; else si.si_band = mangle_poll(band_table[reason - POLL_IN]); si.si_fd = fd; if (!do_send_sig_info(signum, &si, p, type)) break; } fallthrough; /* fall back on the old plain SIGIO signal */ case 0: do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type); } } void send_sigio(struct fown_struct *fown, int fd, int band) { struct task_struct *p; enum pid_type type; unsigned long flags; struct pid *pid; read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; if (!pid) goto out_unlock_fown; if (type <= PIDTYPE_TGID) { rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (p) send_sigio_to_task(p, fown, fd, band, type); rcu_read_unlock(); } else { read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { send_sigio_to_task(p, fown, fd, band, type); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); } out_unlock_fown: read_unlock_irqrestore(&fown->lock, flags); } static void send_sigurg_to_task(struct task_struct *p, struct fown_struct *fown, enum pid_type type) { if (sigio_perm(p, fown, SIGURG)) do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type); } int send_sigurg(struct file *file) { struct fown_struct *fown; struct task_struct *p; enum pid_type type; struct pid *pid; unsigned long flags; int ret = 0; fown = file_f_owner(file); if (!fown) return 0; read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; if (!pid) goto out_unlock_fown; ret = 1; if (type <= PIDTYPE_TGID) { rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (p) send_sigurg_to_task(p, fown, type); rcu_read_unlock(); } else { read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { send_sigurg_to_task(p, fown, type); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); } out_unlock_fown: read_unlock_irqrestore(&fown->lock, flags); return ret; } static DEFINE_SPINLOCK(fasync_lock); static struct kmem_cache *fasync_cache __ro_after_init; /* * Remove a fasync entry. If successfully removed, return * positive and clear the FASYNC flag. If no entry exists, * do nothing and return 0. * * NOTE! It is very important that the FASYNC flag always * match the state "is the filp on a fasync list". * */ int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) { struct fasync_struct *fa, **fp; int result = 0; spin_lock(&filp->f_lock); spin_lock(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { if (fa->fa_file != filp) continue; write_lock_irq(&fa->fa_lock); fa->fa_file = NULL; write_unlock_irq(&fa->fa_lock); *fp = fa->fa_next; kfree_rcu(fa, fa_rcu); filp->f_flags &= ~FASYNC; result = 1; break; } spin_unlock(&fasync_lock); spin_unlock(&filp->f_lock); return result; } struct fasync_struct *fasync_alloc(void) { return kmem_cache_alloc(fasync_cache, GFP_KERNEL); } /* * NOTE! This can be used only for unused fasync entries: * entries that actually got inserted on the fasync list * need to be released by rcu - see fasync_remove_entry. */ void fasync_free(struct fasync_struct *new) { kmem_cache_free(fasync_cache, new); } /* * Insert a new entry into the fasync list. Return the pointer to the * old one if we didn't use the new one. * * NOTE! It is very important that the FASYNC flag always * match the state "is the filp on a fasync list". */ struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new) { struct fasync_struct *fa, **fp; spin_lock(&filp->f_lock); spin_lock(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { if (fa->fa_file != filp) continue; write_lock_irq(&fa->fa_lock); fa->fa_fd = fd; write_unlock_irq(&fa->fa_lock); goto out; } rwlock_init(&new->fa_lock); new->magic = FASYNC_MAGIC; new->fa_file = filp; new->fa_fd = fd; new->fa_next = *fapp; rcu_assign_pointer(*fapp, new); filp->f_flags |= FASYNC; out: spin_unlock(&fasync_lock); spin_unlock(&filp->f_lock); return fa; } /* * Add a fasync entry. Return negative on error, positive if * added, and zero if did nothing but change an existing one. */ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) { struct fasync_struct *new; new = fasync_alloc(); if (!new) return -ENOMEM; /* * fasync_insert_entry() returns the old (update) entry if * it existed. * * So free the (unused) new entry and return 0 to let the * caller know that we didn't add any new fasync entries. */ if (fasync_insert_entry(fd, filp, fapp, new)) { fasync_free(new); return 0; } return 1; } /* * fasync_helper() is used by almost all character device drivers * to set up the fasync queue, and for regular files by the file * lease code. It returns negative on error, 0 if it did no changes * and positive if it added/deleted the entry. */ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) { if (!on) return fasync_remove_entry(filp, fapp); return fasync_add_entry(fd, filp, fapp); } EXPORT_SYMBOL(fasync_helper); /* * rcu_read_lock() is held */ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) { while (fa) { struct fown_struct *fown; unsigned long flags; if (fa->magic != FASYNC_MAGIC) { printk(KERN_ERR "kill_fasync: bad magic number in " "fasync_struct!\n"); return; } read_lock_irqsave(&fa->fa_lock, flags); if (fa->fa_file) { fown = file_f_owner(fa->fa_file); if (!fown) goto next; /* Don't send SIGURG to processes which have not set a queued signum: SIGURG has its own default signalling mechanism. */ if (!(sig == SIGURG && fown->signum == 0)) send_sigio(fown, fa->fa_fd, band); } next: read_unlock_irqrestore(&fa->fa_lock, flags); fa = rcu_dereference(fa->fa_next); } } void kill_fasync(struct fasync_struct **fp, int sig, int band) { /* First a quick test without locking: usually * the list is empty. */ if (*fp) { rcu_read_lock(); kill_fasync_rcu(rcu_dereference(*fp), sig, band); rcu_read_unlock(); } } EXPORT_SYMBOL(kill_fasync); static int __init fcntl_init(void) { /* * Please add new bits here to ensure allocation uniqueness. * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) | __FMODE_EXEC)); fasync_cache = kmem_cache_create("fasync_cache", sizeof(struct fasync_struct), 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); return 0; } module_init(fcntl_init)
9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 // SPDX-License-Identifier: GPL-2.0-only /* * VHT handling * * Portions of this file * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * Copyright (C) 2018 - 2024 Intel Corporation */ #include <linux/ieee80211.h> #include <linux/export.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "rate.h" static void __check_vhtcap_disable(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap, u32 flag) { __le32 le_flag = cpu_to_le32(flag); if (sdata->u.mgd.vht_capa_mask.vht_cap_info & le_flag && !(sdata->u.mgd.vht_capa.vht_cap_info & le_flag)) vht_cap->cap &= ~flag; } void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap) { int i; u16 rxmcs_mask, rxmcs_cap, rxmcs_n, txmcs_mask, txmcs_cap, txmcs_n; if (!vht_cap->vht_supported) return; if (sdata->vif.type != NL80211_IFTYPE_STATION) return; __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_RXLDPC); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SHORT_GI_80); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SHORT_GI_160); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_TXSTBC); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN); __check_vhtcap_disable(sdata, vht_cap, IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN); /* Allow user to decrease AMPDU length exponent */ if (sdata->u.mgd.vht_capa_mask.vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK)) { u32 cap, n; n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; cap = vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; cap >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; if (n < cap) { vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; vht_cap->cap |= n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; } } /* Allow the user to decrease MCSes */ rxmcs_mask = le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.rx_mcs_map); rxmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.rx_mcs_map); rxmcs_n &= rxmcs_mask; rxmcs_cap = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); txmcs_mask = le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.tx_mcs_map); txmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.tx_mcs_map); txmcs_n &= txmcs_mask; txmcs_cap = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); for (i = 0; i < 8; i++) { u8 m, n, c; m = (rxmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { rxmcs_cap &= ~(3 << 2*i); rxmcs_cap |= (rxmcs_n & (3 << 2*i)); } m = (txmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) || n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) { txmcs_cap &= ~(3 << 2*i); txmcs_cap |= (txmcs_n & (3 << 2*i)); } } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_cap); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_cap); } void ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, const struct ieee80211_vht_cap *vht_cap_ie, const struct ieee80211_vht_cap *vht_cap_ie2, struct link_sta_info *link_sta) { struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; struct ieee80211_sta_vht_cap own_cap; u32 cap_info, i; bool have_80mhz; u32 mpdu_len; memset(vht_cap, 0, sizeof(*vht_cap)); if (!link_sta->pub->ht_cap.ht_supported) return; if (!vht_cap_ie || !sband->vht_cap.vht_supported) return; /* Allow VHT if at least one channel on the sband supports 80 MHz */ have_80mhz = false; for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_NO_80MHZ)) continue; have_80mhz = true; break; } if (!have_80mhz) return; /* * A VHT STA must support 40 MHz, but if we verify that here * then we break a few things - some APs (e.g. Netgear R6300v2 * and others based on the BCM4360 chipset) will unset this * capability bit when operating in 20 MHz. */ vht_cap->vht_supported = true; own_cap = sband->vht_cap; /* * If user has specified capability overrides, take care * of that if the station we're setting up is the AP that * we advertised a restricted capability set to. Override * our own capabilities and then use those below. */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !test_sta_flag(link_sta->sta, WLAN_STA_TDLS_PEER)) ieee80211_apply_vhtcap_overrides(sdata, &own_cap); /* take some capabilities as-is */ cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); vht_cap->cap = cap_info; vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB | IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); /* and some based on our own capabilities */ switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; break; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; break; default: /* nothing */ break; } /* symmetric capabilities */ vht_cap->cap |= cap_info & own_cap.cap & (IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160); /* remaining ones */ if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) vht_cap->cap |= cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) vht_cap->cap |= cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK; if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC; /* Copy peer MCS info, the driver might need them. */ memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, sizeof(struct ieee80211_vht_mcs_info)); /* copy EXT_NSS_BW Support value or remove the capability */ if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW)) vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); else vht_cap->vht_mcs.tx_highest &= ~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); /* but also restrict MCSes */ for (i = 0; i < 8; i++) { u16 own_rx, own_tx, peer_rx, peer_tx; own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map); own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map); own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED) peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED; else if (own_rx < peer_tx) peer_tx = own_rx; } if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) { if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED) peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED; else if (own_tx < peer_rx) peer_rx = own_tx; } vht_cap->vht_mcs.rx_mcs_map &= ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2); vht_cap->vht_mcs.tx_mcs_map &= ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2); vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); } /* * This is a workaround for VHT-enabled STAs which break the spec * and have the VHT-MCS Rx map filled in with value 3 for all eight * spatial streams, an example is AR9462. * * As per spec, in section 22.1.1 Introduction to the VHT PHY * A VHT STA shall support at least single spatial stream VHT-MCSs * 0 to 7 (transmit and receive) in all supported channel widths. */ if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) { vht_cap->vht_supported = false; sdata_info(sdata, "Ignoring VHT IE from %pM (link:%pM) due to invalid rx_mcs_map\n", link_sta->sta->addr, link_sta->addr); return; } /* finally set up the bandwidth */ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; default: link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; if (!(vht_cap->vht_mcs.tx_highest & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) break; /* * If this is non-zero, then it does support 160 MHz after all, * in one form or the other. We don't distinguish here (or even * above) between 160 and 80+80 yet. */ if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; } link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta); /* * Work around the Cisco 9115 FW 17.3 bug by taking the min of * both reported MPDU lengths. */ mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK; if (vht_cap_ie2) mpdu_len = min_t(u32, mpdu_len, le32_get_bits(vht_cap_ie2->vht_cap_info, IEEE80211_VHT_CAP_MAX_MPDU_MASK)); /* * FIXME - should the amsdu len be per link? store per link * and maintain a minimum? */ switch (mpdu_len) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454; break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991; break; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: default: link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895; break; } ieee80211_sta_recalc_aggregates(&link_sta->sta->sta); } /* FIXME: move this to some better location - parses HE/EHT now */ static enum ieee80211_sta_rx_bandwidth __ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta, struct cfg80211_chan_def *chandef) { unsigned int link_id = link_sta->link_id; struct ieee80211_sub_if_data *sdata = link_sta->sta->sdata; struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; struct ieee80211_sta_eht_cap *eht_cap = &link_sta->pub->eht_cap; u32 cap_width; if (he_cap->has_he) { enum nl80211_band band; u8 info; if (chandef) { band = chandef->chan->band; } else { struct ieee80211_bss_conf *link_conf; rcu_read_lock(); link_conf = rcu_dereference(sdata->vif.link_conf[link_id]); band = link_conf->chanreq.oper.chan->band; rcu_read_unlock(); } if (eht_cap->has_eht && band == NL80211_BAND_6GHZ) { info = eht_cap->eht_cap_elem.phy_cap_info[0]; if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) return IEEE80211_STA_RX_BW_320; } info = he_cap->he_cap_elem.phy_cap_info[0]; if (band == NL80211_BAND_2GHZ) { if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G) return IEEE80211_STA_RX_BW_40; return IEEE80211_STA_RX_BW_20; } if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G || info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return IEEE80211_STA_RX_BW_160; if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G) return IEEE80211_STA_RX_BW_80; return IEEE80211_STA_RX_BW_20; } if (!vht_cap->vht_supported) return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ || cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) return IEEE80211_STA_RX_BW_160; /* * If this is non-zero, then it does support 160 MHz after all, * in one form or the other. We don't distinguish here (or even * above) between 160 and 80+80 yet. */ if (vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) return IEEE80211_STA_RX_BW_160; return IEEE80211_STA_RX_BW_80; } enum ieee80211_sta_rx_bandwidth _ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta, struct cfg80211_chan_def *chandef) { /* * With RX OMI, also pretend that the STA's capability changed. * Of course this isn't really true, it didn't change, only our * RX capability was changed by notifying RX OMI to the STA. * The purpose, however, is to save power, and that requires * changing also transmissions to the AP and the chanctx. The * transmissions depend on link_sta->bandwidth which is set in * _ieee80211_sta_cur_vht_bw() below, but the chanctx depends * on the result of this function which is also called by * _ieee80211_sta_cur_vht_bw(), so we need to do that here as * well. This is sufficient for the steady state, but during * the transition we already need to change TX/RX separately, * so _ieee80211_sta_cur_vht_bw() below applies the _tx one. */ return min(__ieee80211_sta_cap_rx_bw(link_sta, chandef), link_sta->rx_omi_bw_rx); } enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta) { struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; u32 cap_width; if (!vht_cap->vht_supported) { if (!link_sta->pub->ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20_NOHT; return link_sta->pub->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20; } cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) return NL80211_CHAN_WIDTH_160; else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) return NL80211_CHAN_WIDTH_80P80; return NL80211_CHAN_WIDTH_80; } enum nl80211_chan_width ieee80211_sta_rx_bw_to_chan_width(struct link_sta_info *link_sta) { enum ieee80211_sta_rx_bandwidth cur_bw = link_sta->pub->bandwidth; struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap; u32 cap_width; switch (cur_bw) { case IEEE80211_STA_RX_BW_20: if (!link_sta->pub->ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20_NOHT; else return NL80211_CHAN_WIDTH_20; case IEEE80211_STA_RX_BW_40: return NL80211_CHAN_WIDTH_40; case IEEE80211_STA_RX_BW_80: return NL80211_CHAN_WIDTH_80; case IEEE80211_STA_RX_BW_160: cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) return NL80211_CHAN_WIDTH_160; return NL80211_CHAN_WIDTH_80P80; default: return NL80211_CHAN_WIDTH_20; } } /* FIXME: rename/move - this deals with everything not just VHT */ enum ieee80211_sta_rx_bandwidth _ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta, struct cfg80211_chan_def *chandef) { struct sta_info *sta = link_sta->sta; enum nl80211_chan_width bss_width; enum ieee80211_sta_rx_bandwidth bw; if (chandef) { bss_width = chandef->width; } else { struct ieee80211_bss_conf *link_conf; rcu_read_lock(); link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]); if (WARN_ON_ONCE(!link_conf)) { rcu_read_unlock(); return IEEE80211_STA_RX_BW_20; } bss_width = link_conf->chanreq.oper.width; rcu_read_unlock(); } /* intentionally do not take rx_bw_omi_rx into account */ bw = __ieee80211_sta_cap_rx_bw(link_sta, chandef); bw = min(bw, link_sta->cur_max_bandwidth); /* but do apply rx_omi_bw_tx */ bw = min(bw, link_sta->rx_omi_bw_tx); /* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of * IEEE80211-2016 specification makes higher bandwidth operation * possible on the TDLS link if the peers have wider bandwidth * capability. * * However, in this case, and only if the TDLS peer is authorized, * limit to the tdls_chandef so that the configuration here isn't * wider than what's actually requested on the channel context. */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) && test_sta_flag(sta, WLAN_STA_AUTHORIZED) && sta->tdls_chandef.chan) bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width)); else bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); return bw; } void ieee80211_sta_init_nss(struct link_sta_info *link_sta) { u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss; bool support_160; if (link_sta->pub->eht_cap.has_eht) { int i; const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp; /* get the max nss for EHT over all possible bandwidths and mcs */ for (i = 0; i < sizeof(struct ieee80211_eht_mcs_nss_supp); i++) eht_rx_nss = max_t(u8, eht_rx_nss, u8_get_bits(rx_nss_mcs[i], IEEE80211_EHT_MCS_NSS_RX)); } if (link_sta->pub->he_cap.has_he) { int i; u8 rx_mcs_80 = 0, rx_mcs_160 = 0; const struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; u16 mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); u16 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); for (i = 7; i >= 0; i--) { u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) { rx_mcs_160 = i + 1; break; } } for (i = 7; i >= 0; i--) { u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) { rx_mcs_80 = i + 1; break; } } support_160 = he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; if (support_160) he_rx_nss = min(rx_mcs_80, rx_mcs_160); else he_rx_nss = rx_mcs_80; } if (link_sta->pub->ht_cap.ht_supported) { if (link_sta->pub->ht_cap.mcs.rx_mask[0]) ht_rx_nss++; if (link_sta->pub->ht_cap.mcs.rx_mask[1]) ht_rx_nss++; if (link_sta->pub->ht_cap.mcs.rx_mask[2]) ht_rx_nss++; if (link_sta->pub->ht_cap.mcs.rx_mask[3]) ht_rx_nss++; /* FIXME: consider rx_highest? */ } if (link_sta->pub->vht_cap.vht_supported) { int i; u16 rx_mcs_map; rx_mcs_map = le16_to_cpu(link_sta->pub->vht_cap.vht_mcs.rx_mcs_map); for (i = 7; i >= 0; i--) { u8 mcs = (rx_mcs_map >> (2 * i)) & 3; if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) { vht_rx_nss = i + 1; break; } } /* FIXME: consider rx_highest? */ } rx_nss = max(vht_rx_nss, ht_rx_nss); rx_nss = max(he_rx_nss, rx_nss); rx_nss = max(eht_rx_nss, rx_nss); rx_nss = max_t(u8, 1, rx_nss); link_sta->capa_nss = rx_nss; /* that shouldn't be set yet, but we can handle it anyway */ if (link_sta->op_mode_nss) link_sta->pub->rx_nss = min_t(u8, rx_nss, link_sta->op_mode_nss); else link_sta->pub->rx_nss = rx_nss; } u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct link_sta_info *link_sta, u8 opmode, enum nl80211_band band) { enum ieee80211_sta_rx_bandwidth new_bw; struct sta_opmode_info sta_opmode = {}; u32 changed = 0; u8 nss; /* ignore - no support for BF yet */ if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF) return 0; nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; if (link_sta->op_mode_nss != nss) { if (nss <= link_sta->capa_nss) { link_sta->op_mode_nss = nss; if (nss != link_sta->pub->rx_nss) { link_sta->pub->rx_nss = nss; changed |= IEEE80211_RC_NSS_CHANGED; sta_opmode.rx_nss = link_sta->pub->rx_nss; sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED; } } else { pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d", link_sta->pub->addr, nss); } } switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: /* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: /* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: if (opmode & IEEE80211_OPMODE_NOTIF_BW_160_80P80) link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; else link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: /* legacy only, no longer used by newer spec */ link_sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; } new_bw = ieee80211_sta_cur_vht_bw(link_sta); if (new_bw != link_sta->pub->bandwidth) { link_sta->pub->bandwidth = new_bw; sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(link_sta); changed |= IEEE80211_RC_BW_CHANGED; sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED; } if (sta_opmode.changed) cfg80211_sta_opmode_change_notify(sdata->dev, link_sta->addr, &sta_opmode, GFP_KERNEL); return changed; } void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct ieee80211_mgmt *mgmt) { struct ieee80211_bss_conf *link_conf = link->conf; if (!link_conf->mu_mimo_owner) return; if (!memcmp(mgmt->u.action.u.vht_group_notif.position, link_conf->mu_group.position, WLAN_USER_POSITION_LEN) && !memcmp(mgmt->u.action.u.vht_group_notif.membership, link_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN)) return; memcpy(link_conf->mu_group.membership, mgmt->u.action.u.vht_group_notif.membership, WLAN_MEMBERSHIP_LEN); memcpy(link_conf->mu_group.position, mgmt->u.action.u.vht_group_notif.position, WLAN_USER_POSITION_LEN); ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_MU_GROUPS); } void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id, const u8 *membership, const u8 *position) { struct ieee80211_bss_conf *link_conf; rcu_read_lock(); link_conf = rcu_dereference(vif->link_conf[link_id]); if (!WARN_ON_ONCE(!link_conf || !link_conf->mu_mimo_owner)) { memcpy(link_conf->mu_group.membership, membership, WLAN_MEMBERSHIP_LEN); memcpy(link_conf->mu_group.position, position, WLAN_USER_POSITION_LEN); } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_update_mu_groups); void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, struct link_sta_info *link_sta, u8 opmode, enum nl80211_band band) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; u32 changed = __ieee80211_vht_handle_opmode(sdata, link_sta, opmode, band); if (changed > 0) { ieee80211_recalc_min_chandef(sdata, link_sta->link_id); rate_control_rate_update(local, sband, link_sta, changed); } } void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, u16 vht_mask[NL80211_VHT_NSS_MAX]) { int i; u16 mask, cap = le16_to_cpu(vht_cap); for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; switch (mask) { case IEEE80211_VHT_MCS_SUPPORT_0_7: vht_mask[i] = 0x00FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: vht_mask[i] = 0x01FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_9: vht_mask[i] = 0x03FF; break; case IEEE80211_VHT_MCS_NOT_SUPPORTED: default: vht_mask[i] = 0; break; } } }
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 // SPDX-License-Identifier: GPL-2.0+ /* * PlayStation 2 Trance Vibrator driver * * Copyright (C) 2006 Sam Hocevar <sam@zoy.org> */ /* Standard include files */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #define DRIVER_AUTHOR "Sam Hocevar, sam@zoy.org" #define DRIVER_DESC "PlayStation 2 Trance Vibrator driver" #define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */ #define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */ static const struct usb_device_id id_table[] = { { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) }, { }, }; MODULE_DEVICE_TABLE (usb, id_table); /* Driver-local specific stuff */ struct trancevibrator { struct usb_device *udev; unsigned int speed; }; static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); return sprintf(buf, "%d\n", tv->speed); } static ssize_t speed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_interface *intf = to_usb_interface(dev); struct trancevibrator *tv = usb_get_intfdata(intf); int temp, retval, old; retval = kstrtoint(buf, 10, &temp); if (retval) return retval; if (temp > 255) temp = 255; else if (temp < 0) temp = 0; old = tv->speed; tv->speed = temp; dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed); /* Set speed */ retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0), 0x01, /* vendor request: set speed */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, tv->speed, /* speed value */ 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { tv->speed = old; dev_dbg(&tv->udev->dev, "retval = %d\n", retval); return retval; } return count; } static DEVICE_ATTR_RW(speed); static struct attribute *tv_attrs[] = { &dev_attr_speed.attr, NULL, }; ATTRIBUTE_GROUPS(tv); static int tv_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct trancevibrator *dev; int retval; dev = kzalloc(sizeof(struct trancevibrator), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->udev = usb_get_dev(udev); usb_set_intfdata(interface, dev); return 0; error: kfree(dev); return retval; } static void tv_disconnect(struct usb_interface *interface) { struct trancevibrator *dev; dev = usb_get_intfdata (interface); usb_set_intfdata(interface, NULL); usb_put_dev(dev->udev); kfree(dev); } /* USB subsystem object */ static struct usb_driver tv_driver = { .name = "trancevibrator", .probe = tv_probe, .disconnect = tv_disconnect, .id_table = id_table, .dev_groups = tv_groups, }; module_usb_driver(tv_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
3 2 2 1 4 4 1 3 4 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Mayflash game controller adapters. * * These devices are manufactured by Mayflash but identify themselves * using the vendor ID of DragonRise Inc. * * Tested with: * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter" * 0079:1803 "DragonRise Inc. Mayflash Wireless Sensor DolphinBar" * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter" * 0079:1844 "DragonRise Inc. Mayflash GameCube Game Controller Adapter (v04)" * * The following adapters probably work too, but need to be tested: * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter" * * Copyright (c) 2016-2017 Marcel Hasler <mahasler@gmail.com> */ /* */ #include <linux/input.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" struct mf_device { struct hid_report *report; }; static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct mf_device *mf = data; int strong, weak; strong = effect->u.rumble.strong_magnitude; weak = effect->u.rumble.weak_magnitude; dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak); strong = strong * 0xff / 0xffff; weak = weak * 0xff / 0xffff; dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak); mf->report->field[0]->value[0] = weak; mf->report->field[0]->value[1] = strong; hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT); return 0; } static int mf_init(struct hid_device *hid) { struct mf_device *mf; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr; struct hid_report *report; struct list_head *input_ptr = &hid->inputs; struct hid_input *input; struct input_dev *dev; int error; /* Setup each of the four inputs */ list_for_each(report_ptr, report_list) { report = list_entry(report_ptr, struct hid_report, list); if (report->maxfield < 1 || report->field[0]->report_count < 2) { hid_err(hid, "Invalid report, this should never happen!\n"); return -ENODEV; } if (list_is_last(input_ptr, &hid->inputs)) { hid_err(hid, "Missing input, this should never happen!\n"); return -ENODEV; } input_ptr = input_ptr->next; input = list_entry(input_ptr, struct hid_input, list); mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL); if (!mf) return -ENOMEM; dev = input->input; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, mf, mf_play); if (error) { kfree(mf); return error; } mf->report = report; mf->report->field[0]->value[0] = 0x00; mf->report->field[0]->value[1] = 0x00; hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT); } hid_info(hid, "Force feedback for HJZ Mayflash game controller " "adapters by Marcel Hasler <mahasler@gmail.com>\n"); return 0; } static int mf_probe(struct hid_device *hid, const struct hid_device_id *id) { int error; dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n"); /* Apply quirks as needed */ hid->quirks |= id->driver_data; error = hid_parse(hid); if (error) { hid_err(hid, "HID parse failed.\n"); return error; } error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (error) { hid_err(hid, "HID hw start failed\n"); return error; } error = mf_init(hid); if (error) { hid_err(hid, "Force feedback init failed.\n"); hid_hw_stop(hid); return error; } return 0; } static const struct hid_device_id mf_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), .driver_data = HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), .driver_data = HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), .driver_data = HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2), .driver_data = 0 }, /* No quirk required */ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE3), .driver_data = HID_QUIRK_MULTI_INPUT }, { } }; MODULE_DEVICE_TABLE(hid, mf_devices); static struct hid_driver mf_driver = { .name = "hid_mf", .id_table = mf_devices, .probe = mf_probe, }; module_hid_driver(mf_driver); MODULE_DESCRIPTION("Force feedback support for Mayflash game controller adapters."); MODULE_LICENSE("GPL");
205 25 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 // SPDX-License-Identifier: GPL-2.0-or-later /* */ #include <linux/init.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include "usbaudio.h" #include "helper.h" #include "card.h" #include "endpoint.h" #include "proc.h" /* convert our full speed USB rate into sampling rate in Hz */ static inline unsigned get_full_speed_hz(unsigned int usb_rate) { return (usb_rate * 125 + (1 << 12)) >> 13; } /* convert our high speed USB rate into sampling rate in Hz */ static inline unsigned get_high_speed_hz(unsigned int usb_rate) { return (usb_rate * 125 + (1 << 9)) >> 10; } /* * common proc files to show the usb device info */ static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; if (!atomic_read(&chip->shutdown)) snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum); } static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; if (!atomic_read(&chip->shutdown)) snd_iprintf(buffer, "%04x:%04x\n", USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id)); } void snd_usb_audio_create_proc(struct snd_usb_audio *chip) { snd_card_ro_proc_new(chip->card, "usbbus", chip, proc_audio_usbbus_read); snd_card_ro_proc_new(chip->card, "usbid", chip, proc_audio_usbid_read); } static const char * const channel_labels[] = { [SNDRV_CHMAP_NA] = "N/A", [SNDRV_CHMAP_MONO] = "MONO", [SNDRV_CHMAP_FL] = "FL", [SNDRV_CHMAP_FR] = "FR", [SNDRV_CHMAP_FC] = "FC", [SNDRV_CHMAP_LFE] = "LFE", [SNDRV_CHMAP_RL] = "RL", [SNDRV_CHMAP_RR] = "RR", [SNDRV_CHMAP_FLC] = "FLC", [SNDRV_CHMAP_FRC] = "FRC", [SNDRV_CHMAP_RC] = "RC", [SNDRV_CHMAP_SL] = "SL", [SNDRV_CHMAP_SR] = "SR", [SNDRV_CHMAP_TC] = "TC", [SNDRV_CHMAP_TFL] = "TFL", [SNDRV_CHMAP_TFC] = "TFC", [SNDRV_CHMAP_TFR] = "TFR", [SNDRV_CHMAP_TRL] = "TRL", [SNDRV_CHMAP_TRC] = "TRC", [SNDRV_CHMAP_TRR] = "TRR", [SNDRV_CHMAP_TFLC] = "TFLC", [SNDRV_CHMAP_TFRC] = "TFRC", [SNDRV_CHMAP_LLFE] = "LLFE", [SNDRV_CHMAP_RLFE] = "RLFE", [SNDRV_CHMAP_TSL] = "TSL", [SNDRV_CHMAP_TSR] = "TSR", [SNDRV_CHMAP_BC] = "BC", [SNDRV_CHMAP_RLC] = "RLC", [SNDRV_CHMAP_RRC] = "RRC", }; /* * proc interface for list the supported pcm formats */ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct snd_info_buffer *buffer) { struct audioformat *fp; static const char * const sync_types[4] = { "NONE", "ASYNC", "ADAPTIVE", "SYNC" }; list_for_each_entry(fp, &subs->fmt_list, list) { snd_pcm_format_t fmt; snd_iprintf(buffer, " Interface %d\n", fp->iface); snd_iprintf(buffer, " Altset %d\n", fp->altsetting); snd_iprintf(buffer, " Format:"); pcm_for_each_format(fmt) if (fp->formats & pcm_format_to_bits(fmt)) snd_iprintf(buffer, " %s", snd_pcm_format_name(fmt)); snd_iprintf(buffer, "\n"); snd_iprintf(buffer, " Channels: %d\n", fp->channels); snd_iprintf(buffer, " Endpoint: 0x%02x (%d %s) (%s)\n", fp->endpoint, fp->endpoint & USB_ENDPOINT_NUMBER_MASK, fp->endpoint & USB_DIR_IN ? "IN" : "OUT", sync_types[(fp->ep_attr & USB_ENDPOINT_SYNCTYPE) >> 2]); if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) { snd_iprintf(buffer, " Rates: %d - %d (continuous)\n", fp->rate_min, fp->rate_max); } else { unsigned int i; snd_iprintf(buffer, " Rates: "); for (i = 0; i < fp->nr_rates; i++) { if (i > 0) snd_iprintf(buffer, ", "); snd_iprintf(buffer, "%d", fp->rate_table[i]); } snd_iprintf(buffer, "\n"); } if (subs->speed != USB_SPEED_FULL) snd_iprintf(buffer, " Data packet interval: %d us\n", 125 * (1 << fp->datainterval)); snd_iprintf(buffer, " Bits: %d\n", fp->fmt_bits); if (fp->dsd_raw) snd_iprintf(buffer, " DSD raw: DOP=%d, bitrev=%d\n", fp->dsd_dop, fp->dsd_bitrev); if (fp->chmap) { const struct snd_pcm_chmap_elem *map = fp->chmap; int c; snd_iprintf(buffer, " Channel map:"); for (c = 0; c < map->channels; c++) { if (map->map[c] >= ARRAY_SIZE(channel_labels) || !channel_labels[map->map[c]]) snd_iprintf(buffer, " --"); else snd_iprintf(buffer, " %s", channel_labels[map->map[c]]); } snd_iprintf(buffer, "\n"); } if (fp->sync_ep) { snd_iprintf(buffer, " Sync Endpoint: 0x%02x (%d %s)\n", fp->sync_ep, fp->sync_ep & USB_ENDPOINT_NUMBER_MASK, fp->sync_ep & USB_DIR_IN ? "IN" : "OUT"); snd_iprintf(buffer, " Sync EP Interface: %d\n", fp->sync_iface); snd_iprintf(buffer, " Sync EP Altset: %d\n", fp->sync_altsetting); snd_iprintf(buffer, " Implicit Feedback Mode: %s\n", fp->implicit_fb ? "Yes" : "No"); } // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize); // snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes); } } static void proc_dump_ep_status(struct snd_usb_substream *subs, struct snd_usb_endpoint *data_ep, struct snd_usb_endpoint *sync_ep, struct snd_info_buffer *buffer) { if (!data_ep) return; snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize); snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", subs->speed == USB_SPEED_FULL ? get_full_speed_hz(data_ep->freqm) : get_high_speed_hz(data_ep->freqm), data_ep->freqm >> 16, data_ep->freqm & 0xffff); if (sync_ep && data_ep->freqshift != INT_MIN) { int res = 16 - data_ep->freqshift; snd_iprintf(buffer, " Feedback Format = %d.%d\n", (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res); } } static void proc_dump_substream_status(struct snd_usb_audio *chip, struct snd_usb_substream *subs, struct snd_info_buffer *buffer) { mutex_lock(&chip->mutex); if (subs->running) { snd_iprintf(buffer, " Status: Running\n"); if (subs->cur_audiofmt) { snd_iprintf(buffer, " Interface = %d\n", subs->cur_audiofmt->iface); snd_iprintf(buffer, " Altset = %d\n", subs->cur_audiofmt->altsetting); } proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer); } else { snd_iprintf(buffer, " Status: Stop\n"); } mutex_unlock(&chip->mutex); } static void proc_pcm_format_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_stream *stream = entry->private_data; struct snd_usb_audio *chip = stream->chip; snd_iprintf(buffer, "%s : %s\n", chip->card->longname, stream->pcm->name); if (stream->substream[SNDRV_PCM_STREAM_PLAYBACK].num_formats) { snd_iprintf(buffer, "\nPlayback:\n"); proc_dump_substream_status(chip, &stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer); proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_PLAYBACK], buffer); } if (stream->substream[SNDRV_PCM_STREAM_CAPTURE].num_formats) { snd_iprintf(buffer, "\nCapture:\n"); proc_dump_substream_status(chip, &stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer); proc_dump_substream_formats(&stream->substream[SNDRV_PCM_STREAM_CAPTURE], buffer); } } void snd_usb_proc_pcm_format_add(struct snd_usb_stream *stream) { char name[32]; struct snd_card *card = stream->chip->card; sprintf(name, "stream%d", stream->pcm_index); snd_card_ro_proc_new(card, name, stream, proc_pcm_format_read); }
63 63 63 98 63 63 98 101 101 16 103 103 101 98 101 16 103 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, Intel Corporation. */ /* A common module to handle registrations and notifications for paravirtual * drivers to enable accelerated datapath and support VF live migration. * * The notifier and event handling code is based on netvsc driver. */ #include <linux/module.h> #include <linux/etherdevice.h> #include <uapi/linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <net/failover.h> static LIST_HEAD(failover_list); static DEFINE_SPINLOCK(failover_lock); static struct net_device *failover_get_bymac(u8 *mac, struct failover_ops **ops) { struct net_device *failover_dev; struct failover *failover; spin_lock(&failover_lock); list_for_each_entry(failover, &failover_list, list) { failover_dev = rtnl_dereference(failover->failover_dev); if (ether_addr_equal(failover_dev->perm_addr, mac)) { *ops = rtnl_dereference(failover->ops); spin_unlock(&failover_lock); return failover_dev; } } spin_unlock(&failover_lock); return NULL; } /** * failover_slave_register - Register a slave netdev * * @slave_dev: slave netdev that is being registered * * Registers a slave device to a failover instance. Only ethernet devices * are supported. */ static int failover_slave_register(struct net_device *slave_dev) { struct netdev_lag_upper_info lag_upper_info; struct net_device *failover_dev; struct failover_ops *fops; int err; if (slave_dev->type != ARPHRD_ETHER) goto done; ASSERT_RTNL(); failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); if (!failover_dev) goto done; if (fops && fops->slave_pre_register && fops->slave_pre_register(slave_dev, failover_dev)) goto done; err = netdev_rx_handler_register(slave_dev, fops->slave_handle_frame, failover_dev); if (err) { netdev_err(slave_dev, "can not register failover rx handler (err = %d)\n", err); goto done; } lag_upper_info.tx_type = NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; err = netdev_master_upper_dev_link(slave_dev, failover_dev, NULL, &lag_upper_info, NULL); if (err) { netdev_err(slave_dev, "can not set failover device %s (err = %d)\n", failover_dev->name, err); goto err_upper_link; } slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_NO_ADDRCONF); if (fops && fops->slave_register && !fops->slave_register(slave_dev, failover_dev)) return NOTIFY_OK; netdev_upper_dev_unlink(slave_dev, failover_dev); slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_NO_ADDRCONF); err_upper_link: netdev_rx_handler_unregister(slave_dev); done: return NOTIFY_DONE; } /** * failover_slave_unregister - Unregister a slave netdev * * @slave_dev: slave netdev that is being unregistered * * Unregisters a slave device from a failover instance. */ int failover_slave_unregister(struct net_device *slave_dev) { struct net_device *failover_dev; struct failover_ops *fops; if (!netif_is_failover_slave(slave_dev)) goto done; ASSERT_RTNL(); failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); if (!failover_dev) goto done; if (fops && fops->slave_pre_unregister && fops->slave_pre_unregister(slave_dev, failover_dev)) goto done; netdev_rx_handler_unregister(slave_dev); netdev_upper_dev_unlink(slave_dev, failover_dev); slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_NO_ADDRCONF); if (fops && fops->slave_unregister && !fops->slave_unregister(slave_dev, failover_dev)) return NOTIFY_OK; done: return NOTIFY_DONE; } EXPORT_SYMBOL_GPL(failover_slave_unregister); static int failover_slave_link_change(struct net_device *slave_dev) { struct net_device *failover_dev; struct failover_ops *fops; if (!netif_is_failover_slave(slave_dev)) goto done; ASSERT_RTNL(); failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); if (!failover_dev) goto done; if (!netif_running(failover_dev)) goto done; if (fops && fops->slave_link_change && !fops->slave_link_change(slave_dev, failover_dev)) return NOTIFY_OK; done: return NOTIFY_DONE; } static int failover_slave_name_change(struct net_device *slave_dev) { struct net_device *failover_dev; struct failover_ops *fops; if (!netif_is_failover_slave(slave_dev)) goto done; ASSERT_RTNL(); failover_dev = failover_get_bymac(slave_dev->perm_addr, &fops); if (!failover_dev) goto done; if (!netif_running(failover_dev)) goto done; if (fops && fops->slave_name_change && !fops->slave_name_change(slave_dev, failover_dev)) return NOTIFY_OK; done: return NOTIFY_DONE; } static int failover_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); /* Skip parent events */ if (netif_is_failover(event_dev)) return NOTIFY_DONE; switch (event) { case NETDEV_REGISTER: return failover_slave_register(event_dev); case NETDEV_UNREGISTER: return failover_slave_unregister(event_dev); case NETDEV_UP: case NETDEV_DOWN: case NETDEV_CHANGE: return failover_slave_link_change(event_dev); case NETDEV_CHANGENAME: return failover_slave_name_change(event_dev); default: return NOTIFY_DONE; } } static struct notifier_block failover_notifier = { .notifier_call = failover_event, }; static void failover_existing_slave_register(struct net_device *failover_dev) { struct net *net = dev_net(failover_dev); struct net_device *dev; rtnl_lock(); for_each_netdev(net, dev) { if (netif_is_failover(dev)) continue; if (ether_addr_equal(failover_dev->perm_addr, dev->perm_addr)) failover_slave_register(dev); } rtnl_unlock(); } /** * failover_register - Register a failover instance * * @dev: failover netdev * @ops: failover ops * * Allocate and register a failover instance for a failover netdev. ops * provides handlers for slave device register/unregister/link change/ * name change events. * * Return: pointer to failover instance */ struct failover *failover_register(struct net_device *dev, struct failover_ops *ops) { struct failover *failover; if (dev->type != ARPHRD_ETHER) return ERR_PTR(-EINVAL); failover = kzalloc(sizeof(*failover), GFP_KERNEL); if (!failover) return ERR_PTR(-ENOMEM); rcu_assign_pointer(failover->ops, ops); netdev_hold(dev, &failover->dev_tracker, GFP_KERNEL); dev->priv_flags |= IFF_FAILOVER; rcu_assign_pointer(failover->failover_dev, dev); spin_lock(&failover_lock); list_add_tail(&failover->list, &failover_list); spin_unlock(&failover_lock); netdev_info(dev, "failover master:%s registered\n", dev->name); failover_existing_slave_register(dev); return failover; } EXPORT_SYMBOL_GPL(failover_register); /** * failover_unregister - Unregister a failover instance * * @failover: pointer to failover instance * * Unregisters and frees a failover instance. */ void failover_unregister(struct failover *failover) { struct net_device *failover_dev; failover_dev = rcu_dereference(failover->failover_dev); netdev_info(failover_dev, "failover master:%s unregistered\n", failover_dev->name); failover_dev->priv_flags &= ~IFF_FAILOVER; netdev_put(failover_dev, &failover->dev_tracker); spin_lock(&failover_lock); list_del(&failover->list); spin_unlock(&failover_lock); kfree(failover); } EXPORT_SYMBOL_GPL(failover_unregister); static __init int failover_init(void) { register_netdevice_notifier(&failover_notifier); return 0; } module_init(failover_init); static __exit void failover_exit(void) { unregister_netdevice_notifier(&failover_notifier); } module_exit(failover_exit); MODULE_DESCRIPTION("Generic failover infrastructure/interface"); MODULE_LICENSE("GPL v2");
11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth kernel library. */ #define pr_fmt(fmt) "Bluetooth: " fmt #include <linux/export.h> #include <net/bluetooth/bluetooth.h> /** * baswap() - Swaps the order of a bd address * @dst: Pointer to a bdaddr_t struct that will store the swapped * bd address. * @src: Pointer to the bdaddr_t struct to be swapped. * * This function reverses the byte order of a Bluetooth device * address. */ void baswap(bdaddr_t *dst, const bdaddr_t *src) { const unsigned char *s = (const unsigned char *)src; unsigned char *d = (unsigned char *)dst; unsigned int i; for (i = 0; i < 6; i++) d[i] = s[5 - i]; } EXPORT_SYMBOL(baswap); /** * bt_to_errno() - Bluetooth error codes to standard errno * @code: Bluetooth error code to be converted * * This function takes a Bluetooth error code as input and convets * it to an equivalent Unix/standard errno value. * * Return: * * If the bt error code is known, an equivalent Unix errno value * is returned. * If the given bt error code is not known, ENOSYS is returned. */ int bt_to_errno(__u16 code) { switch (code) { case 0: return 0; case 0x01: return EBADRQC; case 0x02: return ENOTCONN; case 0x03: return EIO; case 0x04: case 0x3c: return EHOSTDOWN; case 0x05: return EACCES; case 0x06: return EBADE; case 0x07: return ENOMEM; case 0x08: return ETIMEDOUT; case 0x09: return EMLINK; case 0x0a: return EMLINK; case 0x0b: return EALREADY; case 0x0c: return EBUSY; case 0x0d: case 0x0e: case 0x0f: return ECONNREFUSED; case 0x10: return ETIMEDOUT; case 0x11: case 0x27: case 0x29: case 0x20: return EOPNOTSUPP; case 0x12: return EINVAL; case 0x13: case 0x14: case 0x15: return ECONNRESET; case 0x16: return ECONNABORTED; case 0x17: return ELOOP; case 0x18: return EACCES; case 0x1a: return EPROTONOSUPPORT; case 0x1b: return ECONNREFUSED; case 0x19: case 0x1e: case 0x23: case 0x24: case 0x25: return EPROTO; default: return ENOSYS; } } EXPORT_SYMBOL(bt_to_errno); /** * bt_status() - Standard errno value to Bluetooth error code * @err: Unix/standard errno value to be converted * * This function converts a standard/Unix errno value to an * equivalent Bluetooth error code. * * Return: Bluetooth error code. * * If the given errno is not found, 0x1f is returned by default * which indicates an unspecified error. * For err >= 0, no conversion is performed, and the same value * is immediately returned. */ __u8 bt_status(int err) { if (err >= 0) return err; switch (err) { case -EBADRQC: return 0x01; case -ENOTCONN: return 0x02; case -EIO: return 0x03; case -EHOSTDOWN: return 0x04; case -EACCES: return 0x05; case -EBADE: return 0x06; case -ENOMEM: return 0x07; case -ETIMEDOUT: return 0x08; case -EMLINK: return 0x09; case -EALREADY: return 0x0b; case -EBUSY: return 0x0c; case -ECONNREFUSED: return 0x0d; case -EOPNOTSUPP: return 0x11; case -EINVAL: return 0x12; case -ECONNRESET: return 0x13; case -ECONNABORTED: return 0x16; case -ELOOP: return 0x17; case -EPROTONOSUPPORT: return 0x1a; case -EPROTO: return 0x19; default: return 0x1f; } } EXPORT_SYMBOL(bt_status); /** * bt_info() - Log Bluetooth information message * @format: Message's format string */ void bt_info(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_info("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_info); /** * bt_warn() - Log Bluetooth warning message * @format: Message's format string */ void bt_warn(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_warn("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_warn); /** * bt_err() - Log Bluetooth error message * @format: Message's format string */ void bt_err(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_err("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_err); #ifdef CONFIG_BT_FEATURE_DEBUG static bool debug_enable; void bt_dbg_set(bool enable) { debug_enable = enable; } bool bt_dbg_get(void) { return debug_enable; } /** * bt_dbg() - Log Bluetooth debugging message * @format: Message's format string */ void bt_dbg(const char *format, ...) { struct va_format vaf; va_list args; if (likely(!debug_enable)) return; va_start(args, format); vaf.fmt = format; vaf.va = &args; printk(KERN_DEBUG pr_fmt("%pV"), &vaf); va_end(args); } EXPORT_SYMBOL(bt_dbg); #endif /** * bt_warn_ratelimited() - Log rate-limited Bluetooth warning message * @format: Message's format string * * This functions works like bt_warn, but it uses rate limiting * to prevent the message from being logged too often. */ void bt_warn_ratelimited(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_warn_ratelimited("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_warn_ratelimited); /** * bt_err_ratelimited() - Log rate-limited Bluetooth error message * @format: Message's format string * * This functions works like bt_err, but it uses rate limiting * to prevent the message from being logged too often. */ void bt_err_ratelimited(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_err_ratelimited("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_err_ratelimited);
3165 3200 3194 3200 3196 3198 3199 3195 3192 31 31 162 161 106 106 106 105 106 106 55 56 56 56 55 56 56 3162 3165 3164 3171 3171 3157 3165 3165 3162 3163 3163 3171 3165 3164 3161 3162 3161 3163 3139 3141 3090 3091 3087 3097 3088 3085 3082 104 103 31 104 104 104 104 104 104 104 104 104 104 104 104 31 31 31 31 31 3161 3084 3170 3171 104 104 104 104 104 104 59 60 60 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 // SPDX-License-Identifier: GPL-2.0 /* * bus.c - bus driver management * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2007 Novell Inc. * Copyright (c) 2023 Greg Kroah-Hartman <gregkh@linuxfoundation.org> */ #include <linux/async.h> #include <linux/device/bus.h> #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include "base.h" #include "power/power.h" /* /sys/devices/system */ static struct kset *system_kset; /* /sys/bus */ static struct kset *bus_kset; #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) /* * sysfs bindings for drivers */ #define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) #define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ struct driver_attribute driver_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) static int __must_check bus_rescan_devices_helper(struct device *dev, void *data); /** * bus_to_subsys - Turn a struct bus_type into a struct subsys_private * * @bus: pointer to the struct bus_type to look up * * The driver core internals needs to work on the subsys_private structure, not * the external struct bus_type pointer. This function walks the list of * registered busses in the system and finds the matching one and returns the * internal struct subsys_private that relates to that bus. * * Note, the reference count of the return value is INCREMENTED if it is not * NULL. A call to subsys_put() must be done when finished with the pointer in * order for it to be properly freed. */ static struct subsys_private *bus_to_subsys(const struct bus_type *bus) { struct subsys_private *sp = NULL; struct kobject *kobj; if (!bus || !bus_kset) return NULL; spin_lock(&bus_kset->list_lock); if (list_empty(&bus_kset->list)) goto done; list_for_each_entry(kobj, &bus_kset->list, entry) { struct kset *kset = container_of(kobj, struct kset, kobj); sp = container_of_const(kset, struct subsys_private, subsys); if (sp->bus == bus) goto done; } sp = NULL; done: sp = subsys_get(sp); spin_unlock(&bus_kset->list_lock); return sp; } static const struct bus_type *bus_get(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); if (sp) return bus; return NULL; } static void bus_put(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); /* two puts are required as the call to bus_to_subsys incremented it again */ subsys_put(sp); subsys_put(sp); } static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct driver_attribute *drv_attr = to_drv_attr(attr); struct driver_private *drv_priv = to_driver(kobj); ssize_t ret = -EIO; if (drv_attr->show) ret = drv_attr->show(drv_priv->driver, buf); return ret; } static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct driver_attribute *drv_attr = to_drv_attr(attr); struct driver_private *drv_priv = to_driver(kobj); ssize_t ret = -EIO; if (drv_attr->store) ret = drv_attr->store(drv_priv->driver, buf, count); return ret; } static const struct sysfs_ops driver_sysfs_ops = { .show = drv_attr_show, .store = drv_attr_store, }; static void driver_release(struct kobject *kobj) { struct driver_private *drv_priv = to_driver(kobj); pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__); kfree(drv_priv); } static const struct kobj_type driver_ktype = { .sysfs_ops = &driver_sysfs_ops, .release = driver_release, }; /* * sysfs bindings for buses */ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); /* return -EIO for reading a bus attribute without show() */ ssize_t ret = -EIO; if (bus_attr->show) ret = bus_attr->show(subsys_priv->bus, buf); return ret; } static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct bus_attribute *bus_attr = to_bus_attr(attr); struct subsys_private *subsys_priv = to_subsys_private(kobj); /* return -EIO for writing a bus attribute without store() */ ssize_t ret = -EIO; if (bus_attr->store) ret = bus_attr->store(subsys_priv->bus, buf, count); return ret; } static const struct sysfs_ops bus_sysfs_ops = { .show = bus_attr_show, .store = bus_attr_store, }; int bus_create_file(const struct bus_type *bus, struct bus_attribute *attr) { struct subsys_private *sp = bus_to_subsys(bus); int error; if (!sp) return -EINVAL; error = sysfs_create_file(&sp->subsys.kobj, &attr->attr); subsys_put(sp); return error; } EXPORT_SYMBOL_GPL(bus_create_file); void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr) { struct subsys_private *sp = bus_to_subsys(bus); if (!sp) return; sysfs_remove_file(&sp->subsys.kobj, &attr->attr); subsys_put(sp); } EXPORT_SYMBOL_GPL(bus_remove_file); static void bus_release(struct kobject *kobj) { struct subsys_private *priv = to_subsys_private(kobj); lockdep_unregister_key(&priv->lock_key); kfree(priv); } static const struct kobj_type bus_ktype = { .sysfs_ops = &bus_sysfs_ops, .release = bus_release, }; static int bus_uevent_filter(const struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); if (ktype == &bus_ktype) return 1; return 0; } static const struct kset_uevent_ops bus_uevent_ops = { .filter = bus_uevent_filter, }; /* Manually detach a device from its associated driver. */ static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count) { const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { device_driver_detach(dev); err = count; } put_device(dev); bus_put(bus); return err; } static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store); /* * Manually attach a device to a driver. * Note: the driver must want to bind to the device, * it is not possible to override the driver's id table. */ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count) { const struct bus_type *bus = bus_get(drv->bus); struct device *dev; int err = -ENODEV; dev = bus_find_device_by_name(bus, NULL, buf); if (dev && driver_match_device(drv, dev)) { err = device_driver_attach(drv, dev); if (!err) { /* success */ err = count; } } put_device(dev); bus_put(bus); return err; } static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); static ssize_t drivers_autoprobe_show(const struct bus_type *bus, char *buf) { struct subsys_private *sp = bus_to_subsys(bus); int ret; if (!sp) return -EINVAL; ret = sysfs_emit(buf, "%d\n", sp->drivers_autoprobe); subsys_put(sp); return ret; } static ssize_t drivers_autoprobe_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); if (!sp) return -EINVAL; if (buf[0] == '0') sp->drivers_autoprobe = 0; else sp->drivers_autoprobe = 1; subsys_put(sp); return count; } static ssize_t drivers_probe_store(const struct bus_type *bus, const char *buf, size_t count) { struct device *dev; int err = -EINVAL; dev = bus_find_device_by_name(bus, NULL, buf); if (!dev) return -ENODEV; if (bus_rescan_devices_helper(dev, NULL) == 0) err = count; put_device(dev); return err; } static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *dev_prv; if (n) { dev_prv = to_device_private_bus(n); dev = dev_prv->device; } return dev; } /** * bus_for_each_dev - device iterator. * @bus: bus type. * @start: device to start iterating from. * @data: data for the callback. * @fn: function to be called for each device. * * Iterate over @bus's list of devices, and call @fn for each, * passing it @data. If @start is not NULL, we use that device to * begin iterating from. * * We check the return of @fn each time. If it returns anything * other than 0, we break out and return that value. * * NOTE: The device that returns a non-zero value is not retained * in any way, nor is its refcount incremented. If the caller needs * to retain this data, it should do so, and increment the reference * count in the supplied callback. */ int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data, device_iter_t fn) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; struct device *dev; int error = 0; if (!sp) return -EINVAL; klist_iter_init_node(&sp->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); while (!error && (dev = next_device(&i))) error = fn(dev, data); klist_iter_exit(&i); subsys_put(sp); return error; } EXPORT_SYMBOL_GPL(bus_for_each_dev); /** * bus_find_device - device iterator for locating a particular device. * @bus: bus type * @start: Device to begin with * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the bus_for_each_dev() function above, but it * returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero, this function will * return to the caller and not iterate over any more devices. */ struct device *bus_find_device(const struct bus_type *bus, struct device *start, const void *data, device_match_t match) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; struct device *dev; if (!sp) return NULL; klist_iter_init_node(&sp->klist_devices, &i, (start ? &start->p->knode_bus : NULL)); while ((dev = next_device(&i))) { if (match(dev, data)) { get_device(dev); break; } } klist_iter_exit(&i); subsys_put(sp); return dev; } EXPORT_SYMBOL_GPL(bus_find_device); static struct device_driver *next_driver(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct driver_private *drv_priv; if (n) { drv_priv = container_of(n, struct driver_private, knode_bus); return drv_priv->driver; } return NULL; } /** * bus_for_each_drv - driver iterator * @bus: bus we're dealing with. * @start: driver to start iterating on. * @data: data to pass to the callback. * @fn: function to call for each driver. * * This is nearly identical to the device iterator above. * We iterate over each driver that belongs to @bus, and call * @fn for each. If @fn returns anything but 0, we break out * and return it. If @start is not NULL, we use it as the head * of the list. * * NOTE: we don't return the driver that returns a non-zero * value, nor do we leave the reference count incremented for that * driver. If the caller needs to know that info, it must set it * in the callback. It must also be sure to increment the refcount * so it doesn't disappear before returning to the caller. */ int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start, void *data, int (*fn)(struct device_driver *, void *)) { struct subsys_private *sp = bus_to_subsys(bus); struct klist_iter i; struct device_driver *drv; int error = 0; if (!sp) return -EINVAL; klist_iter_init_node(&sp->klist_drivers, &i, start ? &start->p->knode_bus : NULL); while ((drv = next_driver(&i)) && !error) error = fn(drv, data); klist_iter_exit(&i); subsys_put(sp); return error; } EXPORT_SYMBOL_GPL(bus_for_each_drv); /** * bus_add_device - add device to bus * @dev: device being added * * - Add device's bus attributes. * - Create links to device's bus. * - Add the device to its bus's list of devices. */ int bus_add_device(struct device *dev) { struct subsys_private *sp = bus_to_subsys(dev->bus); int error; if (!sp) { /* * This is a normal operation for many devices that do not * have a bus assigned to them, just say that all went * well. */ return 0; } /* * Reference in sp is now incremented and will be dropped when * the device is removed from the bus */ pr_debug("bus: '%s': add device %s\n", sp->bus->name, dev_name(dev)); error = device_add_groups(dev, sp->bus->dev_groups); if (error) goto out_put; error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev)); if (error) goto out_groups; error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem"); if (error) goto out_subsys; klist_add_tail(&dev->p->knode_bus, &sp->klist_devices); return 0; out_subsys: sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); out_groups: device_remove_groups(dev, sp->bus->dev_groups); out_put: subsys_put(sp); return error; } /** * bus_probe_device - probe drivers for a new device * @dev: device to probe * * - Automatically probe for a driver if the bus allows it. */ void bus_probe_device(struct device *dev) { struct subsys_private *sp = bus_to_subsys(dev->bus); struct subsys_interface *sif; if (!sp) return; if (sp->drivers_autoprobe) device_initial_probe(dev); mutex_lock(&sp->mutex); list_for_each_entry(sif, &sp->interfaces, node) if (sif->add_dev) sif->add_dev(dev, sif); mutex_unlock(&sp->mutex); subsys_put(sp); } /** * bus_remove_device - remove device from bus * @dev: device to be removed * * - Remove device from all interfaces. * - Remove symlink from bus' directory. * - Delete device from bus's list. * - Detach from its driver. * - Drop reference taken in bus_add_device(). */ void bus_remove_device(struct device *dev) { struct subsys_private *sp = bus_to_subsys(dev->bus); struct subsys_interface *sif; if (!sp) return; mutex_lock(&sp->mutex); list_for_each_entry(sif, &sp->interfaces, node) if (sif->remove_dev) sif->remove_dev(dev, sif); mutex_unlock(&sp->mutex); sysfs_remove_link(&dev->kobj, "subsystem"); sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); device_remove_groups(dev, dev->bus->dev_groups); if (klist_node_attached(&dev->p->knode_bus)) klist_del(&dev->p->knode_bus); pr_debug("bus: '%s': remove device %s\n", dev->bus->name, dev_name(dev)); device_release_driver(dev); /* * Decrement the reference count twice, once for the bus_to_subsys() * call in the start of this function, and the second one from the * reference increment in bus_add_device() */ subsys_put(sp); subsys_put(sp); } static int __must_check add_bind_files(struct device_driver *drv) { int ret; ret = driver_create_file(drv, &driver_attr_unbind); if (ret == 0) { ret = driver_create_file(drv, &driver_attr_bind); if (ret) driver_remove_file(drv, &driver_attr_unbind); } return ret; } static void remove_bind_files(struct device_driver *drv) { driver_remove_file(drv, &driver_attr_bind); driver_remove_file(drv, &driver_attr_unbind); } static BUS_ATTR_WO(drivers_probe); static BUS_ATTR_RW(drivers_autoprobe); static int add_probe_files(const struct bus_type *bus) { int retval; retval = bus_create_file(bus, &bus_attr_drivers_probe); if (retval) goto out; retval = bus_create_file(bus, &bus_attr_drivers_autoprobe); if (retval) bus_remove_file(bus, &bus_attr_drivers_probe); out: return retval; } static void remove_probe_files(const struct bus_type *bus) { bus_remove_file(bus, &bus_attr_drivers_autoprobe); bus_remove_file(bus, &bus_attr_drivers_probe); } static ssize_t uevent_store(struct device_driver *drv, const char *buf, size_t count) { int rc; rc = kobject_synth_uevent(&drv->p->kobj, buf, count); return rc ? rc : count; } static DRIVER_ATTR_WO(uevent); /** * bus_add_driver - Add a driver to the bus. * @drv: driver. */ int bus_add_driver(struct device_driver *drv) { struct subsys_private *sp = bus_to_subsys(drv->bus); struct driver_private *priv; int error = 0; if (!sp) return -EINVAL; /* * Reference in sp is now incremented and will be dropped when * the driver is removed from the bus */ pr_debug("bus: '%s': add driver %s\n", sp->bus->name, drv->name); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { error = -ENOMEM; goto out_put_bus; } klist_init(&priv->klist_devices, NULL, NULL); priv->driver = drv; drv->p = priv; priv->kobj.kset = sp->drivers_kset; error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL, "%s", drv->name); if (error) goto out_unregister; klist_add_tail(&priv->knode_bus, &sp->klist_drivers); if (sp->drivers_autoprobe) { error = driver_attach(drv); if (error) goto out_del_list; } error = module_add_driver(drv->owner, drv); if (error) { printk(KERN_ERR "%s: failed to create module links for %s\n", __func__, drv->name); goto out_detach; } error = driver_create_file(drv, &driver_attr_uevent); if (error) { printk(KERN_ERR "%s: uevent attr (%s) failed\n", __func__, drv->name); } error = driver_add_groups(drv, sp->bus->drv_groups); if (error) { /* How the hell do we get out of this pickle? Give up */ printk(KERN_ERR "%s: driver_add_groups(%s) failed\n", __func__, drv->name); } if (!drv->suppress_bind_attrs) { error = add_bind_files(drv); if (error) { /* Ditto */ printk(KERN_ERR "%s: add_bind_files(%s) failed\n", __func__, drv->name); } } return 0; out_detach: driver_detach(drv); out_del_list: klist_del(&priv->knode_bus); out_unregister: kobject_put(&priv->kobj); /* drv->p is freed in driver_release() */ drv->p = NULL; out_put_bus: subsys_put(sp); return error; } /** * bus_remove_driver - delete driver from bus's knowledge. * @drv: driver. * * Detach the driver from the devices it controls, and remove * it from its bus's list of drivers. Finally, we drop the reference * to the bus we took in bus_add_driver(). */ void bus_remove_driver(struct device_driver *drv) { struct subsys_private *sp = bus_to_subsys(drv->bus); if (!sp) return; pr_debug("bus: '%s': remove driver %s\n", sp->bus->name, drv->name); if (!drv->suppress_bind_attrs) remove_bind_files(drv); driver_remove_groups(drv, sp->bus->drv_groups); driver_remove_file(drv, &driver_attr_uevent); klist_remove(&drv->p->knode_bus); driver_detach(drv); module_remove_driver(drv); kobject_put(&drv->p->kobj); /* * Decrement the reference count twice, once for the bus_to_subsys() * call in the start of this function, and the second one from the * reference increment in bus_add_driver() */ subsys_put(sp); subsys_put(sp); } /* Helper for bus_rescan_devices's iter */ static int __must_check bus_rescan_devices_helper(struct device *dev, void *data) { int ret = 0; if (!dev->driver) { if (dev->parent && dev->bus->need_parent_lock) device_lock(dev->parent); ret = device_attach(dev); if (dev->parent && dev->bus->need_parent_lock) device_unlock(dev->parent); } return ret < 0 ? ret : 0; } /** * bus_rescan_devices - rescan devices on the bus for possible drivers * @bus: the bus to scan. * * This function will look for devices on the bus with no driver * attached and rescan it against existing drivers to see if it matches * any by calling device_attach() for the unbound devices. */ int bus_rescan_devices(const struct bus_type *bus) { return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper); } EXPORT_SYMBOL_GPL(bus_rescan_devices); /** * device_reprobe - remove driver for a device and probe for a new driver * @dev: the device to reprobe * * This function detaches the attached driver (if any) for the given * device and restarts the driver probing process. It is intended * to use if probing criteria changed during a devices lifetime and * driver attachment should change accordingly. */ int device_reprobe(struct device *dev) { if (dev->driver) device_driver_detach(dev); return bus_rescan_devices_helper(dev, NULL); } EXPORT_SYMBOL_GPL(device_reprobe); static void klist_devices_get(struct klist_node *n) { struct device_private *dev_prv = to_device_private_bus(n); struct device *dev = dev_prv->device; get_device(dev); } static void klist_devices_put(struct klist_node *n) { struct device_private *dev_prv = to_device_private_bus(n); struct device *dev = dev_prv->device; put_device(dev); } static ssize_t bus_uevent_store(const struct bus_type *bus, const char *buf, size_t count) { struct subsys_private *sp = bus_to_subsys(bus); int ret; if (!sp) return -EINVAL; ret = kobject_synth_uevent(&sp->subsys.kobj, buf, count); subsys_put(sp); if (ret) return ret; return count; } /* * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO() * here, but can not use it as earlier in the file we have * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store * function name. */ static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL, bus_uevent_store); /** * bus_register - register a driver-core subsystem * @bus: bus to register * * Once we have that, we register the bus with the kobject * infrastructure, then register the children subsystems it has: * the devices and drivers that belong to the subsystem. */ int bus_register(const struct bus_type *bus) { int retval; struct subsys_private *priv; struct kobject *bus_kobj; struct lock_class_key *key; priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL); if (!priv) return -ENOMEM; priv->bus = bus; BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier); bus_kobj = &priv->subsys.kobj; retval = kobject_set_name(bus_kobj, "%s", bus->name); if (retval) goto out; bus_kobj->kset = bus_kset; bus_kobj->ktype = &bus_ktype; priv->drivers_autoprobe = 1; retval = kset_register(&priv->subsys); if (retval) goto out; retval = bus_create_file(bus, &bus_attr_uevent); if (retval) goto bus_uevent_fail; priv->devices_kset = kset_create_and_add("devices", NULL, bus_kobj); if (!priv->devices_kset) { retval = -ENOMEM; goto bus_devices_fail; } priv->drivers_kset = kset_create_and_add("drivers", NULL, bus_kobj); if (!priv->drivers_kset) { retval = -ENOMEM; goto bus_drivers_fail; } INIT_LIST_HEAD(&priv->interfaces); key = &priv->lock_key; lockdep_register_key(key); __mutex_init(&priv->mutex, "subsys mutex", key); klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put); klist_init(&priv->klist_drivers, NULL, NULL); retval = add_probe_files(bus); if (retval) goto bus_probe_files_fail; retval = sysfs_create_groups(bus_kobj, bus->bus_groups); if (retval) goto bus_groups_fail; pr_debug("bus: '%s': registered\n", bus->name); return 0; bus_groups_fail: remove_probe_files(bus); bus_probe_files_fail: kset_unregister(priv->drivers_kset); bus_drivers_fail: kset_unregister(priv->devices_kset); bus_devices_fail: bus_remove_file(bus, &bus_attr_uevent); bus_uevent_fail: kset_unregister(&priv->subsys); /* Above kset_unregister() will kfree @priv */ priv = NULL; out: kfree(priv); return retval; } EXPORT_SYMBOL_GPL(bus_register); /** * bus_unregister - remove a bus from the system * @bus: bus. * * Unregister the child subsystems and the bus itself. * Finally, we call bus_put() to release the refcount */ void bus_unregister(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kobject *bus_kobj; if (!sp) return; pr_debug("bus: '%s': unregistering\n", bus->name); if (sp->dev_root) device_unregister(sp->dev_root); bus_kobj = &sp->subsys.kobj; sysfs_remove_groups(bus_kobj, bus->bus_groups); remove_probe_files(bus); bus_remove_file(bus, &bus_attr_uevent); kset_unregister(sp->drivers_kset); kset_unregister(sp->devices_kset); kset_unregister(&sp->subsys); subsys_put(sp); } EXPORT_SYMBOL_GPL(bus_unregister); int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb) { struct subsys_private *sp = bus_to_subsys(bus); int retval; if (!sp) return -EINVAL; retval = blocking_notifier_chain_register(&sp->bus_notifier, nb); subsys_put(sp); return retval; } EXPORT_SYMBOL_GPL(bus_register_notifier); int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb) { struct subsys_private *sp = bus_to_subsys(bus); int retval; if (!sp) return -EINVAL; retval = blocking_notifier_chain_unregister(&sp->bus_notifier, nb); subsys_put(sp); return retval; } EXPORT_SYMBOL_GPL(bus_unregister_notifier); void bus_notify(struct device *dev, enum bus_notifier_event value) { struct subsys_private *sp = bus_to_subsys(dev->bus); if (!sp) return; blocking_notifier_call_chain(&sp->bus_notifier, value, dev); subsys_put(sp); } struct kset *bus_get_kset(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kset *kset; if (!sp) return NULL; kset = &sp->subsys; subsys_put(sp); return kset; } EXPORT_SYMBOL_GPL(bus_get_kset); /* * Yes, this forcibly breaks the klist abstraction temporarily. It * just wants to sort the klist, not change reference counts and * take/drop locks rapidly in the process. It does all this while * holding the lock for the list, so objects can't otherwise be * added/removed while we're swizzling. */ static void device_insertion_sort_klist(struct device *a, struct list_head *list, int (*compare)(const struct device *a, const struct device *b)) { struct klist_node *n; struct device_private *dev_prv; struct device *b; list_for_each_entry(n, list, n_node) { dev_prv = to_device_private_bus(n); b = dev_prv->device; if (compare(a, b) <= 0) { list_move_tail(&a->p->knode_bus.n_node, &b->p->knode_bus.n_node); return; } } list_move_tail(&a->p->knode_bus.n_node, list); } void bus_sort_breadthfirst(const struct bus_type *bus, int (*compare)(const struct device *a, const struct device *b)) { struct subsys_private *sp = bus_to_subsys(bus); LIST_HEAD(sorted_devices); struct klist_node *n, *tmp; struct device_private *dev_prv; struct device *dev; struct klist *device_klist; if (!sp) return; device_klist = &sp->klist_devices; spin_lock(&device_klist->k_lock); list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) { dev_prv = to_device_private_bus(n); dev = dev_prv->device; device_insertion_sort_klist(dev, &sorted_devices, compare); } list_splice(&sorted_devices, &device_klist->k_list); spin_unlock(&device_klist->k_lock); subsys_put(sp); } EXPORT_SYMBOL_GPL(bus_sort_breadthfirst); struct subsys_dev_iter { struct klist_iter ki; const struct device_type *type; }; /** * subsys_dev_iter_init - initialize subsys device iterator * @iter: subsys iterator to initialize * @sp: the subsys private (i.e. bus) we wanna iterate over * @start: the device to start iterating from, if any * @type: device_type of the devices to iterate over, NULL for all * * Initialize subsys iterator @iter such that it iterates over devices * of @subsys. If @start is set, the list iteration will start there, * otherwise if it is NULL, the iteration starts at the beginning of * the list. */ static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp, struct device *start, const struct device_type *type) { struct klist_node *start_knode = NULL; if (start) start_knode = &start->p->knode_bus; klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode); iter->type = type; } /** * subsys_dev_iter_next - iterate to the next device * @iter: subsys iterator to proceed * * Proceed @iter to the next device and return it. Returns NULL if * iteration is complete. * * The returned device is referenced and won't be released till * iterator is proceed to the next device or exited. The caller is * free to do whatever it wants to do with the device including * calling back into subsys code. */ static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) { struct klist_node *knode; struct device *dev; for (;;) { knode = klist_next(&iter->ki); if (!knode) return NULL; dev = to_device_private_bus(knode)->device; if (!iter->type || iter->type == dev->type) return dev; } } /** * subsys_dev_iter_exit - finish iteration * @iter: subsys iterator to finish * * Finish an iteration. Always call this function after iteration is * complete whether the iteration ran till the end or not. */ static void subsys_dev_iter_exit(struct subsys_dev_iter *iter) { klist_iter_exit(&iter->ki); } int subsys_interface_register(struct subsys_interface *sif) { struct subsys_private *sp; struct subsys_dev_iter iter; struct device *dev; if (!sif || !sif->subsys) return -ENODEV; sp = bus_to_subsys(sif->subsys); if (!sp) return -EINVAL; /* * Reference in sp is now incremented and will be dropped when * the interface is removed from the bus */ mutex_lock(&sp->mutex); list_add_tail(&sif->node, &sp->interfaces); if (sif->add_dev) { subsys_dev_iter_init(&iter, sp, NULL, NULL); while ((dev = subsys_dev_iter_next(&iter))) sif->add_dev(dev, sif); subsys_dev_iter_exit(&iter); } mutex_unlock(&sp->mutex); return 0; } EXPORT_SYMBOL_GPL(subsys_interface_register); void subsys_interface_unregister(struct subsys_interface *sif) { struct subsys_private *sp; struct subsys_dev_iter iter; struct device *dev; if (!sif || !sif->subsys) return; sp = bus_to_subsys(sif->subsys); if (!sp) return; mutex_lock(&sp->mutex); list_del_init(&sif->node); if (sif->remove_dev) { subsys_dev_iter_init(&iter, sp, NULL, NULL); while ((dev = subsys_dev_iter_next(&iter))) sif->remove_dev(dev, sif); subsys_dev_iter_exit(&iter); } mutex_unlock(&sp->mutex); /* * Decrement the reference count twice, once for the bus_to_subsys() * call in the start of this function, and the second one from the * reference increment in subsys_interface_register() */ subsys_put(sp); subsys_put(sp); } EXPORT_SYMBOL_GPL(subsys_interface_unregister); static void system_root_device_release(struct device *dev) { kfree(dev); } static int subsys_register(const struct bus_type *subsys, const struct attribute_group **groups, struct kobject *parent_of_root) { struct subsys_private *sp; struct device *dev; int err; err = bus_register(subsys); if (err < 0) return err; sp = bus_to_subsys(subsys); if (!sp) { err = -EINVAL; goto err_sp; } dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!dev) { err = -ENOMEM; goto err_dev; } err = dev_set_name(dev, "%s", subsys->name); if (err < 0) goto err_name; dev->kobj.parent = parent_of_root; dev->groups = groups; dev->release = system_root_device_release; err = device_register(dev); if (err < 0) goto err_dev_reg; sp->dev_root = dev; subsys_put(sp); return 0; err_dev_reg: put_device(dev); dev = NULL; err_name: kfree(dev); err_dev: subsys_put(sp); err_sp: bus_unregister(subsys); return err; } /** * subsys_system_register - register a subsystem at /sys/devices/system/ * @subsys: system subsystem * @groups: default attributes for the root device * * All 'system' subsystems have a /sys/devices/system/<name> root device * with the name of the subsystem. The root device can carry subsystem- * wide attributes. All registered devices are below this single root * device and are named after the subsystem with a simple enumeration * number appended. The registered devices are not explicitly named; * only 'id' in the device needs to be set. * * Do not use this interface for anything new, it exists for compatibility * with bad ideas only. New subsystems should use plain subsystems; and * add the subsystem-wide attributes should be added to the subsystem * directory itself and not some create fake root-device placed in * /sys/devices/system/<name>. */ int subsys_system_register(const struct bus_type *subsys, const struct attribute_group **groups) { return subsys_register(subsys, groups, &system_kset->kobj); } EXPORT_SYMBOL_GPL(subsys_system_register); /** * subsys_virtual_register - register a subsystem at /sys/devices/virtual/ * @subsys: virtual subsystem * @groups: default attributes for the root device * * All 'virtual' subsystems have a /sys/devices/system/<name> root device * with the name of the subsystem. The root device can carry subsystem-wide * attributes. All registered devices are below this single root device. * There's no restriction on device naming. This is for kernel software * constructs which need sysfs interface. */ int subsys_virtual_register(const struct bus_type *subsys, const struct attribute_group **groups) { struct kobject *virtual_dir; virtual_dir = virtual_device_parent(); if (!virtual_dir) return -ENOMEM; return subsys_register(subsys, groups, virtual_dir); } EXPORT_SYMBOL_GPL(subsys_virtual_register); /** * driver_find - locate driver on a bus by its name. * @name: name of the driver. * @bus: bus to scan for the driver. * * Call kset_find_obj() to iterate over list of drivers on * a bus to find driver by name. Return driver if found. * * This routine provides no locking to prevent the driver it returns * from being unregistered or unloaded while the caller is using it. * The caller is responsible for preventing this. */ struct device_driver *driver_find(const char *name, const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct kobject *k; struct driver_private *priv; if (!sp) return NULL; k = kset_find_obj(sp->drivers_kset, name); subsys_put(sp); if (!k) return NULL; priv = to_driver(k); /* Drop reference added by kset_find_obj() */ kobject_put(k); return priv->driver; } EXPORT_SYMBOL_GPL(driver_find); /* * Warning, the value could go to "removed" instantly after calling this function, so be very * careful when calling it... */ bool bus_is_registered(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); bool is_initialized = false; if (sp) { is_initialized = true; subsys_put(sp); } return is_initialized; } /** * bus_get_dev_root - return a pointer to the "device root" of a bus * @bus: bus to return the device root of. * * If a bus has a "device root" structure, return it, WITH THE REFERENCE * COUNT INCREMENTED. * * Note, when finished with the device, a call to put_device() is required. * * If the device root is not present (or bus is not a valid pointer), NULL * will be returned. */ struct device *bus_get_dev_root(const struct bus_type *bus) { struct subsys_private *sp = bus_to_subsys(bus); struct device *dev_root; if (!sp) return NULL; dev_root = get_device(sp->dev_root); subsys_put(sp); return dev_root; } EXPORT_SYMBOL_GPL(bus_get_dev_root); int __init buses_init(void) { bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL); if (!bus_kset) return -ENOMEM; system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj); if (!system_kset) { /* Do error handling here as devices_init() do */ kset_unregister(bus_kset); bus_kset = NULL; pr_err("%s: failed to create and add kset 'bus'\n", __func__); return -ENOMEM; } return 0; }
10 10 9 10 10 10 10 10 10 5 5 5 4 5 5 4 4 1 3 4 4 3 1 1 1 10 10 7 7 4 7 3 1 6 4 2 4 9 2 2 10 9 9 10 9 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant Linux driver for the * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module * * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) * * Thanks to GENPIX for the sample code used to implement this module. * * This module is based off the vp7045 and vp702x modules * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "gp8psk.h" #include "gp8psk-fe.h" /* debug */ static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw"; int dvb_usb_gp8psk_debug; module_param_named(debug,dvb_usb_gp8psk_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct gp8psk_state { unsigned char data[80]; }; static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { struct gp8psk_state *st = d->priv; int ret = 0,try = 0; if (blen > sizeof(st->data)) return -EIO; if ((ret = mutex_lock_interruptible(&d->usb_mutex))) return ret; while (ret >= 0 && ret != blen && try < 3) { ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), req, USB_TYPE_VENDOR | USB_DIR_IN, value, index, st->data, blen, 2000); deb_info("reading number %d (ret: %d)\n",try,ret); try++; } if (ret < 0 || ret != blen) { warn("usb in %d operation failed.", req); ret = -EIO; } else { ret = 0; memcpy(b, st->data, blen); } deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index); debug_dump(b,blen,deb_xfer); mutex_unlock(&d->usb_mutex); return ret; } static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { struct gp8psk_state *st = d->priv; int ret; deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index); debug_dump(b,blen,deb_xfer); if (blen > sizeof(st->data)) return -EIO; if ((ret = mutex_lock_interruptible(&d->usb_mutex))) return ret; memcpy(st->data, b, blen); if (usb_control_msg(d->udev, usb_sndctrlpipe(d->udev,0), req, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, st->data, blen, 2000) != blen) { warn("usb out operation failed."); ret = -EIO; } else ret = 0; mutex_unlock(&d->usb_mutex); return ret; } static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers) { return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6); } static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers) { return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1); } static void gp8psk_info(struct dvb_usb_device *d) { u8 fpga_vers, fw_vers[6]; if (!gp8psk_get_fw_version(d, fw_vers)) info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i", fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers), 2000 + fw_vers[5], fw_vers[4], fw_vers[3]); else info("failed to get FW version"); if (!gp8psk_get_fpga_version(d, &fpga_vers)) info("FPGA Version = %i", fpga_vers); else info("failed to get FPGA version"); } static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) { int ret; const struct firmware *fw = NULL; const u8 *ptr; u8 *buf; if ((ret = request_firmware(&fw, bcm4500_firmware, &d->udev->dev)) != 0) { err("did not find the bcm4500 firmware file '%s' (status %d). You can use <kernel_dir>/scripts/get_dvb_firmware to get the firmware", bcm4500_firmware,ret); return ret; } ret = -EINVAL; if (gp8psk_usb_out_op(d, LOAD_BCM4500,1,0,NULL, 0)) goto out_rel_fw; info("downloading bcm4500 firmware from file '%s'",bcm4500_firmware); ptr = fw->data; buf = kmalloc(64, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out_rel_fw; } while (ptr[0] != 0xff) { u16 buflen = ptr[0] + 4; if (ptr + buflen >= fw->data + fw->size) { err("failed to load bcm4500 firmware."); goto out_free; } if (buflen > 64) { err("firmware chunk size bigger than 64 bytes."); goto out_free; } memcpy(buf, ptr, buflen); if (dvb_usb_generic_write(d, buf, buflen)) { err("failed to load bcm4500 firmware."); goto out_free; } ptr += buflen; } ret = 0; out_free: kfree(buf); out_rel_fw: release_firmware(fw); return ret; } static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 status = 0, buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); if (onoff) { gp8psk_usb_in_op(d, GET_8PSK_CONFIG,0,0,&status,1); if (! (status & bm8pskStarted)) { /* started */ if(gp_product_id == USB_PID_GENPIX_SKYWALKER_CW3K) gp8psk_usb_out_op(d, CW3K_INIT, 1, 0, NULL, 0); if (gp8psk_usb_in_op(d, BOOT_8PSK, 1, 0, &buf, 1)) return -EINVAL; gp8psk_info(d); } if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) if (! (status & bm8pskFW_Loaded)) /* BCM4500 firmware loaded */ if(gp8psk_load_bcm4500fw(d)) return -EINVAL; if (! (status & bmIntersilOn)) /* LNB Power */ if (gp8psk_usb_in_op(d, START_INTERSIL, 1, 0, &buf, 1)) return -EINVAL; /* Set DVB mode to 1 */ if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) if (gp8psk_usb_out_op(d, SET_DVB_MODE, 1, 0, NULL, 0)) return -EINVAL; /* Abort possible TS (if previous tune crashed) */ if (gp8psk_usb_out_op(d, ARM_TRANSFER, 0, 0, NULL, 0)) return -EINVAL; } else { /* Turn off LNB power */ if (gp8psk_usb_in_op(d, START_INTERSIL, 0, 0, &buf, 1)) return -EINVAL; /* Turn off 8psk power */ if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) return -EINVAL; if(gp_product_id == USB_PID_GENPIX_SKYWALKER_CW3K) gp8psk_usb_out_op(d, CW3K_INIT, 0, 0, NULL, 0); } return 0; } static int gp8psk_bcm4500_reload(struct dvb_usb_device *d) { u8 buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); deb_xfer("reloading firmware\n"); /* Turn off 8psk power */ if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1)) return -EINVAL; /* Turn On 8psk power */ if (gp8psk_usb_in_op(d, BOOT_8PSK, 1, 0, &buf, 1)) return -EINVAL; /* load BCM4500 firmware */ if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM) if (gp8psk_load_bcm4500fw(d)) return -EINVAL; return 0; } static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0); } /* Callbacks for gp8psk-fe.c */ static int gp8psk_fe_in(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen) { struct dvb_usb_device *d = priv; return gp8psk_usb_in_op(d, req, value, index, b, blen); } static int gp8psk_fe_out(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen) { struct dvb_usb_device *d = priv; return gp8psk_usb_out_op(d, req, value, index, b, blen); } static int gp8psk_fe_reload(void *priv) { struct dvb_usb_device *d = priv; return gp8psk_bcm4500_reload(d); } static const struct gp8psk_fe_ops gp8psk_fe_ops = { .in = gp8psk_fe_in, .out = gp8psk_fe_out, .reload = gp8psk_fe_reload, }; static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; int id = le16_to_cpu(d->udev->descriptor.idProduct); int is_rev1; is_rev1 = id == USB_PID_GENPIX_8PSK_REV_1_WARM; adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach, &gp8psk_fe_ops, d, is_rev1); return 0; } static struct dvb_usb_device_properties gp8psk_properties; static int gp8psk_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct usb_device *udev = interface_to_usbdev(intf); ret = dvb_usb_device_init(intf, &gp8psk_properties, THIS_MODULE, NULL, adapter_nr); if (ret == 0) { info("found Genpix USB device pID = %x (hex)", le16_to_cpu(udev->descriptor.idProduct)); } return ret; } enum { GENPIX_8PSK_REV_1_COLD, GENPIX_8PSK_REV_1_WARM, GENPIX_8PSK_REV_2, GENPIX_SKYWALKER_1, GENPIX_SKYWALKER_2, GENPIX_SKYWALKER_CW3K, }; static const struct usb_device_id gp8psk_usb_table[] = { DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_1_COLD), DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_1_WARM), DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_2), DVB_USB_DEV(GENPIX, GENPIX_SKYWALKER_1), DVB_USB_DEV(GENPIX, GENPIX_SKYWALKER_2), DVB_USB_DEV(GENPIX, GENPIX_SKYWALKER_CW3K), { } }; MODULE_DEVICE_TABLE(usb, gp8psk_usb_table); static struct dvb_usb_device_properties gp8psk_properties = { .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-gp8psk-01.fw", .size_of_priv = sizeof(struct gp8psk_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = gp8psk_streaming_ctrl, .frontend_attach = gp8psk_frontend_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x82, .u = { .bulk = { .buffersize = 8192, } } }, }}, } }, .power_ctrl = gp8psk_power_ctrl, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 4, .devices = { { .name = "Genpix 8PSK-to-USB2 Rev.1 DVB-S receiver", .cold_ids = { &gp8psk_usb_table[GENPIX_8PSK_REV_1_COLD], NULL }, .warm_ids = { &gp8psk_usb_table[GENPIX_8PSK_REV_1_WARM], NULL }, }, { .name = "Genpix 8PSK-to-USB2 Rev.2 DVB-S receiver", .cold_ids = { NULL }, .warm_ids = { &gp8psk_usb_table[GENPIX_8PSK_REV_2], NULL }, }, { .name = "Genpix SkyWalker-1 DVB-S receiver", .cold_ids = { NULL }, .warm_ids = { &gp8psk_usb_table[GENPIX_SKYWALKER_1], NULL }, }, { .name = "Genpix SkyWalker-2 DVB-S receiver", .cold_ids = { NULL }, .warm_ids = { &gp8psk_usb_table[GENPIX_SKYWALKER_2], NULL }, }, { NULL }, } }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver gp8psk_usb_driver = { .name = "dvb_usb_gp8psk", .probe = gp8psk_usb_probe, .disconnect = dvb_usb_device_exit, .id_table = gp8psk_usb_table, }; module_usb_driver(gp8psk_usb_driver); MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>"); MODULE_DESCRIPTION("Driver for Genpix DVB-S"); MODULE_VERSION("1.1"); MODULE_LICENSE("GPL");
10 10 10 10 10 10 9 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 /************************************************************************** * * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * **************************************************************************/ /* * Generic simple memory manager implementation. Intended to be used as a base * class implementation for more advanced memory managers. * * Note that the algorithm used is quite simple and there might be substantial * performance gains if a smarter free list is implemented. Currently it is * just an unordered stack of free regions. This could easily be improved if * an RB-tree is used instead. At least if we expect heavy fragmentation. * * Aligned allocations can also see improvement. * * Authors: * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ #include <linux/export.h> #include <linux/interval_tree_generic.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stacktrace.h> #include <drm/drm_mm.h> /** * DOC: Overview * * drm_mm provides a simple range allocator. The drivers are free to use the * resource allocator from the linux core if it suits them, the upside of drm_mm * is that it's in the DRM core. Which means that it's easier to extend for * some of the crazier special purpose needs of gpus. * * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. * Drivers are free to embed either of them into their own suitable * datastructures. drm_mm itself will not do any memory allocations of its own, * so if drivers choose not to embed nodes they need to still allocate them * themselves. * * The range allocator also supports reservation of preallocated blocks. This is * useful for taking over initial mode setting configurations from the firmware, * where an object needs to be created which exactly matches the firmware's * scanout target. As long as the range is still free it can be inserted anytime * after the allocator is initialized, which helps with avoiding looped * dependencies in the driver load sequence. * * drm_mm maintains a stack of most recently freed holes, which of all * simplistic datastructures seems to be a fairly decent approach to clustering * allocations and avoiding too much fragmentation. This means free space * searches are O(num_holes). Given that all the fancy features drm_mm supports * something better would be fairly complex and since gfx thrashing is a fairly * steep cliff not a real concern. Removing a node again is O(1). * * drm_mm supports a few features: Alignment and range restrictions can be * supplied. Furthermore every &drm_mm_node has a color value (which is just an * opaque unsigned long) which in conjunction with a driver callback can be used * to implement sophisticated placement restrictions. The i915 DRM driver uses * this to implement guard pages between incompatible caching domains in the * graphics TT. * * Two behaviors are supported for searching and allocating: bottom-up and * top-down. The default is bottom-up. Top-down allocation can be used if the * memory area has different restrictions, or just to reduce fragmentation. * * Finally iteration helpers to walk all nodes and all holes are provided as are * some basic allocator dumpers for debugging. * * Note that this range allocator is not thread-safe, drivers need to protect * modifications with their own locking. The idea behind this is that for a full * memory manager additional data needs to be protected anyway, hence internal * locking would be fully redundant. */ #ifdef CONFIG_DRM_DEBUG_MM #include <linux/stackdepot.h> #define STACKDEPTH 32 #define BUFSZ 4096 static noinline void save_stack(struct drm_mm_node *node) { unsigned long entries[STACKDEPTH]; unsigned int n; n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); /* May be called under spinlock, so avoid sleeping */ node->stack = stack_depot_save(entries, n, GFP_NOWAIT); } static void show_leaks(struct drm_mm *mm) { struct drm_mm_node *node; char *buf; buf = kmalloc(BUFSZ, GFP_KERNEL); if (!buf) return; list_for_each_entry(node, drm_mm_nodes(mm), node_list) { if (!node->stack) { DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", node->start, node->size); continue; } stack_depot_snprint(node->stack, buf, BUFSZ, 0); DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", node->start, node->size, buf); } kfree(buf); } #undef STACKDEPTH #undef BUFSZ #else static void save_stack(struct drm_mm_node *node) { } static void show_leaks(struct drm_mm *mm) { } #endif #define START(node) ((node)->start) #define LAST(node) ((node)->start + (node)->size - 1) INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, u64, __subtree_last, START, LAST, static inline __maybe_unused, drm_mm_interval_tree) struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) { return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, start, last) ?: (struct drm_mm_node *)&mm->head_node; } EXPORT_SYMBOL(__drm_mm_interval_first); static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, struct drm_mm_node *node) { struct drm_mm *mm = hole_node->mm; struct rb_node **link, *rb; struct drm_mm_node *parent; bool leftmost; node->__subtree_last = LAST(node); if (drm_mm_node_allocated(hole_node)) { rb = &hole_node->rb; while (rb) { parent = rb_entry(rb, struct drm_mm_node, rb); if (parent->__subtree_last >= node->__subtree_last) break; parent->__subtree_last = node->__subtree_last; rb = rb_parent(rb); } rb = &hole_node->rb; link = &hole_node->rb.rb_right; leftmost = false; } else { rb = NULL; link = &mm->interval_tree.rb_root.rb_node; leftmost = true; } while (*link) { rb = *link; parent = rb_entry(rb, struct drm_mm_node, rb); if (parent->__subtree_last < node->__subtree_last) parent->__subtree_last = node->__subtree_last; if (node->start < parent->start) { link = &parent->rb.rb_left; } else { link = &parent->rb.rb_right; leftmost = false; } } rb_link_node(&node->rb, rb, link); rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, &drm_mm_interval_tree_augment); } #define HOLE_SIZE(NODE) ((NODE)->hole_size) #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) static u64 rb_to_hole_size(struct rb_node *rb) { return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; } static void insert_hole_size(struct rb_root_cached *root, struct drm_mm_node *node) { struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; u64 x = node->hole_size; bool first = true; while (*link) { rb = *link; if (x > rb_to_hole_size(rb)) { link = &rb->rb_left; } else { link = &rb->rb_right; first = false; } } rb_link_node(&node->rb_hole_size, rb, link); rb_insert_color_cached(&node->rb_hole_size, root, first); } RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, struct drm_mm_node, rb_hole_addr, u64, subtree_max_hole, HOLE_SIZE) static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node) { struct rb_node **link = &root->rb_node, *rb_parent = NULL; u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole; struct drm_mm_node *parent; while (*link) { rb_parent = *link; parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr); if (parent->subtree_max_hole < subtree_max_hole) parent->subtree_max_hole = subtree_max_hole; if (start < HOLE_ADDR(parent)) link = &parent->rb_hole_addr.rb_left; else link = &parent->rb_hole_addr.rb_right; } rb_link_node(&node->rb_hole_addr, rb_parent, link); rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks); } static void add_hole(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; node->hole_size = __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); node->subtree_max_hole = node->hole_size; DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); insert_hole_size(&mm->holes_size, node); insert_hole_addr(&mm->holes_addr, node); list_add(&node->hole_stack, &mm->hole_stack); } static void rm_hole(struct drm_mm_node *node) { DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); list_del(&node->hole_stack); rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr, &augment_callbacks); node->hole_size = 0; node->subtree_max_hole = 0; DRM_MM_BUG_ON(drm_mm_hole_follows(node)); } static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) { return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); } static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) { return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); } static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) { struct rb_node *rb = mm->holes_size.rb_root.rb_node; struct drm_mm_node *best = NULL; do { struct drm_mm_node *node = rb_entry(rb, struct drm_mm_node, rb_hole_size); if (size <= node->hole_size) { best = node; rb = rb->rb_right; } else { rb = rb->rb_left; } } while (rb); return best; } static bool usable_hole_addr(struct rb_node *rb, u64 size) { return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; } static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) { struct rb_node *rb = mm->holes_addr.rb_node; struct drm_mm_node *node = NULL; while (rb) { u64 hole_start; if (!usable_hole_addr(rb, size)) break; node = rb_hole_addr_to_node(rb); hole_start = __drm_mm_hole_node_start(node); if (addr < hole_start) rb = node->rb_hole_addr.rb_left; else if (addr > hole_start + node->hole_size) rb = node->rb_hole_addr.rb_right; else break; } return node; } static struct drm_mm_node * first_hole(struct drm_mm *mm, u64 start, u64 end, u64 size, enum drm_mm_insert_mode mode) { switch (mode) { default: case DRM_MM_INSERT_BEST: return best_hole(mm, size); case DRM_MM_INSERT_LOW: return find_hole_addr(mm, start, size); case DRM_MM_INSERT_HIGH: return find_hole_addr(mm, end, size); case DRM_MM_INSERT_EVICT: return list_first_entry_or_null(&mm->hole_stack, struct drm_mm_node, hole_stack); } } /** * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions * @name: name of function to declare * @first: first rb member to traverse (either rb_left or rb_right). * @last: last rb member to traverse (either rb_right or rb_left). * * This macro declares a function to return the next hole of the addr rb tree. * While traversing the tree we take the searched size into account and only * visit branches with potential big enough holes. */ #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \ static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \ { \ struct rb_node *parent, *node = &entry->rb_hole_addr; \ \ if (!entry || RB_EMPTY_NODE(node)) \ return NULL; \ \ if (usable_hole_addr(node->first, size)) { \ node = node->first; \ while (usable_hole_addr(node->last, size)) \ node = node->last; \ return rb_hole_addr_to_node(node); \ } \ \ while ((parent = rb_parent(node)) && node == parent->first) \ node = parent; \ \ return rb_hole_addr_to_node(parent); \ } DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right) DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left) static struct drm_mm_node * next_hole(struct drm_mm *mm, struct drm_mm_node *node, u64 size, enum drm_mm_insert_mode mode) { switch (mode) { default: case DRM_MM_INSERT_BEST: return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); case DRM_MM_INSERT_LOW: return next_hole_low_addr(node, size); case DRM_MM_INSERT_HIGH: return next_hole_high_addr(node, size); case DRM_MM_INSERT_EVICT: node = list_next_entry(node, hole_stack); return &node->hole_stack == &mm->hole_stack ? NULL : node; } } /** * drm_mm_reserve_node - insert an pre-initialized node * @mm: drm_mm allocator to insert @node into * @node: drm_mm_node to insert * * This functions inserts an already set-up &drm_mm_node into the allocator, * meaning that start, size and color must be set by the caller. All other * fields must be cleared to 0. This is useful to initialize the allocator with * preallocated objects which must be set-up before the range allocator can be * set-up, e.g. when taking over a firmware framebuffer. * * Returns: * 0 on success, -ENOSPC if there's no hole where @node is. */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) { struct drm_mm_node *hole; u64 hole_start, hole_end; u64 adj_start, adj_end; u64 end; end = node->start + node->size; if (unlikely(end <= node->start)) return -ENOSPC; /* Find the relevant hole to add our node to */ hole = find_hole_addr(mm, node->start, 0); if (!hole) return -ENOSPC; adj_start = hole_start = __drm_mm_hole_node_start(hole); adj_end = hole_end = hole_start + hole->hole_size; if (mm->color_adjust) mm->color_adjust(hole, node->color, &adj_start, &adj_end); if (adj_start > node->start || adj_end < end) return -ENOSPC; node->mm = mm; __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); list_add(&node->node_list, &hole->node_list); drm_mm_interval_tree_add_node(hole, node); node->hole_size = 0; rm_hole(hole); if (node->start > hole_start) add_hole(hole); if (end < hole_end) add_hole(node); save_stack(node); return 0; } EXPORT_SYMBOL(drm_mm_reserve_node); static u64 rb_to_hole_size_or_zero(struct rb_node *rb) { return rb ? rb_to_hole_size(rb) : 0; } /** * drm_mm_insert_node_in_range - ranged search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation * @alignment: alignment of the allocation * @color: opaque tag value to use for this node * @range_start: start of the allowed range for this node * @range_end: end of the allowed range for this node * @mode: fine-tune the allocation search and placement * * The preallocated @node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ int drm_mm_insert_node_in_range(struct drm_mm * const mm, struct drm_mm_node * const node, u64 size, u64 alignment, unsigned long color, u64 range_start, u64 range_end, enum drm_mm_insert_mode mode) { struct drm_mm_node *hole; u64 remainder_mask; bool once; DRM_MM_BUG_ON(range_start > range_end); if (unlikely(size == 0 || range_end - range_start < size)) return -ENOSPC; if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size) return -ENOSPC; if (alignment <= 1) alignment = 0; once = mode & DRM_MM_INSERT_ONCE; mode &= ~DRM_MM_INSERT_ONCE; remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; for (hole = first_hole(mm, range_start, range_end, size, mode); hole; hole = once ? NULL : next_hole(mm, hole, size, mode)) { u64 hole_start = __drm_mm_hole_node_start(hole); u64 hole_end = hole_start + hole->hole_size; u64 adj_start, adj_end; u64 col_start, col_end; if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) break; if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) break; col_start = hole_start; col_end = hole_end; if (mm->color_adjust) mm->color_adjust(hole, color, &col_start, &col_end); adj_start = max(col_start, range_start); adj_end = min(col_end, range_end); if (adj_end <= adj_start || adj_end - adj_start < size) continue; if (mode == DRM_MM_INSERT_HIGH) adj_start = adj_end - size; if (alignment) { u64 rem; if (likely(remainder_mask)) rem = adj_start & remainder_mask; else div64_u64_rem(adj_start, alignment, &rem); if (rem) { adj_start -= rem; if (mode != DRM_MM_INSERT_HIGH) adj_start += alignment; if (adj_start < max(col_start, range_start) || min(col_end, range_end) - adj_start < size) continue; if (adj_end <= adj_start || adj_end - adj_start < size) continue; } } node->mm = mm; node->size = size; node->start = adj_start; node->color = color; node->hole_size = 0; __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); list_add(&node->node_list, &hole->node_list); drm_mm_interval_tree_add_node(hole, node); rm_hole(hole); if (adj_start > hole_start) add_hole(hole); if (adj_start + size < hole_end) add_hole(node); save_stack(node); return 0; } return -ENOSPC; } EXPORT_SYMBOL(drm_mm_insert_node_in_range); static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node) { return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); } /** * drm_mm_remove_node - Remove a memory node from the allocator. * @node: drm_mm_node to remove * * This just removes a node from its drm_mm allocator. The node does not need to * be cleared again before it can be re-inserted into this or any other drm_mm * allocator. It is a bug to call this function on a unallocated node. */ void drm_mm_remove_node(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; struct drm_mm_node *prev_node; DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); prev_node = list_prev_entry(node, node_list); if (drm_mm_hole_follows(node)) rm_hole(node); drm_mm_interval_tree_remove(node, &mm->interval_tree); list_del(&node->node_list); if (drm_mm_hole_follows(prev_node)) rm_hole(prev_node); add_hole(prev_node); clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); } EXPORT_SYMBOL(drm_mm_remove_node); /** * DOC: lru scan roster * * Very often GPUs need to have continuous allocations for a given object. When * evicting objects to make space for a new one it is therefore not most * efficient when we simply start to select all objects from the tail of an LRU * until there's a suitable hole: Especially for big objects or nodes that * otherwise have special allocation constraints there's a good chance we evict * lots of (smaller) objects unnecessarily. * * The DRM range allocator supports this use-case through the scanning * interfaces. First a scan operation needs to be initialized with * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds * objects to the roster, probably by walking an LRU list, but this can be * freely implemented. Eviction candidates are added using * drm_mm_scan_add_block() until a suitable hole is found or there are no * further evictable objects. Eviction roster metadata is tracked in &struct * drm_mm_scan. * * The driver must walk through all objects again in exactly the reverse * order to restore the allocator state. Note that while the allocator is used * in the scan mode no other operation is allowed. * * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() * reported true) in the scan, and any overlapping nodes after color adjustment * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and * since freeing a node is also O(1) the overall complexity is * O(scanned_objects). So like the free stack which needs to be walked before a * scan operation even begins this is linear in the number of objects. It * doesn't seem to hurt too badly. */ /** * drm_mm_scan_init_with_range - initialize range-restricted lru scanning * @scan: scan state * @mm: drm_mm to scan * @size: size of the allocation * @alignment: alignment of the allocation * @color: opaque tag value to use for the allocation * @start: start of the allowed range for the allocation * @end: end of the allowed range for the allocation * @mode: fine-tune the allocation search and placement * * This simply sets up the scanning routines with the parameters for the desired * hole. * * Warning: * As long as the scan list is non-empty, no other operations than * adding/removing nodes to/from the scan list are allowed. */ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, struct drm_mm *mm, u64 size, u64 alignment, unsigned long color, u64 start, u64 end, enum drm_mm_insert_mode mode) { DRM_MM_BUG_ON(start >= end); DRM_MM_BUG_ON(!size || size > end - start); DRM_MM_BUG_ON(mm->scan_active); scan->mm = mm; if (alignment <= 1) alignment = 0; scan->color = color; scan->alignment = alignment; scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; scan->size = size; scan->mode = mode; DRM_MM_BUG_ON(end <= start); scan->range_start = start; scan->range_end = end; scan->hit_start = U64_MAX; scan->hit_end = 0; } EXPORT_SYMBOL(drm_mm_scan_init_with_range); /** * drm_mm_scan_add_block - add a node to the scan list * @scan: the active drm_mm scanner * @node: drm_mm_node to add * * Add a node to the scan list that might be freed to make space for the desired * hole. * * Returns: * True if a hole has been found, false otherwise. */ bool drm_mm_scan_add_block(struct drm_mm_scan *scan, struct drm_mm_node *node) { struct drm_mm *mm = scan->mm; struct drm_mm_node *hole; u64 hole_start, hole_end; u64 col_start, col_end; u64 adj_start, adj_end; DRM_MM_BUG_ON(node->mm != mm); DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); mm->scan_active++; /* Remove this block from the node_list so that we enlarge the hole * (distance between the end of our previous node and the start of * or next), without poisoning the link so that we can restore it * later in drm_mm_scan_remove_block(). */ hole = list_prev_entry(node, node_list); DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); __list_del_entry(&node->node_list); hole_start = __drm_mm_hole_node_start(hole); hole_end = __drm_mm_hole_node_end(hole); col_start = hole_start; col_end = hole_end; if (mm->color_adjust) mm->color_adjust(hole, scan->color, &col_start, &col_end); adj_start = max(col_start, scan->range_start); adj_end = min(col_end, scan->range_end); if (adj_end <= adj_start || adj_end - adj_start < scan->size) return false; if (scan->mode == DRM_MM_INSERT_HIGH) adj_start = adj_end - scan->size; if (scan->alignment) { u64 rem; if (likely(scan->remainder_mask)) rem = adj_start & scan->remainder_mask; else div64_u64_rem(adj_start, scan->alignment, &rem); if (rem) { adj_start -= rem; if (scan->mode != DRM_MM_INSERT_HIGH) adj_start += scan->alignment; if (adj_start < max(col_start, scan->range_start) || min(col_end, scan->range_end) - adj_start < scan->size) return false; if (adj_end <= adj_start || adj_end - adj_start < scan->size) return false; } } scan->hit_start = adj_start; scan->hit_end = adj_start + scan->size; DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); DRM_MM_BUG_ON(scan->hit_start < hole_start); DRM_MM_BUG_ON(scan->hit_end > hole_end); return true; } EXPORT_SYMBOL(drm_mm_scan_add_block); /** * drm_mm_scan_remove_block - remove a node from the scan list * @scan: the active drm_mm scanner * @node: drm_mm_node to remove * * Nodes **must** be removed in exactly the reverse order from the scan list as * they have been added (e.g. using list_add() as they are added and then * list_for_each() over that eviction list to remove), otherwise the internal * state of the memory manager will be corrupted. * * When the scan list is empty, the selected memory nodes can be freed. An * immediately following drm_mm_insert_node_in_range_generic() or one of the * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return * the just freed block (because it's at the top of the free_stack list). * * Returns: * True if this block should be evicted, false otherwise. Will always * return false when no hole has been found. */ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, struct drm_mm_node *node) { struct drm_mm_node *prev_node; DRM_MM_BUG_ON(node->mm != scan->mm); DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node)); __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); DRM_MM_BUG_ON(!node->mm->scan_active); node->mm->scan_active--; /* During drm_mm_scan_add_block() we decoupled this node leaving * its pointers intact. Now that the caller is walking back along * the eviction list we can restore this block into its rightful * place on the full node_list. To confirm that the caller is walking * backwards correctly we check that prev_node->next == node->next, * i.e. both believe the same node should be on the other side of the * hole. */ prev_node = list_prev_entry(node, node_list); DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != list_next_entry(node, node_list)); list_add(&node->node_list, &prev_node->node_list); return (node->start + node->size > scan->hit_start && node->start < scan->hit_end); } EXPORT_SYMBOL(drm_mm_scan_remove_block); /** * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole * @scan: drm_mm scan with target hole * * After completing an eviction scan and removing the selected nodes, we may * need to remove a few more nodes from either side of the target hole if * mm.color_adjust is being used. * * Returns: * A node to evict, or NULL if there are no overlapping nodes. */ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) { struct drm_mm *mm = scan->mm; struct drm_mm_node *hole; u64 hole_start, hole_end; DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); if (!mm->color_adjust) return NULL; /* * The hole found during scanning should ideally be the first element * in the hole_stack list, but due to side-effects in the driver it * may not be. */ list_for_each_entry(hole, &mm->hole_stack, hole_stack) { hole_start = __drm_mm_hole_node_start(hole); hole_end = hole_start + hole->hole_size; if (hole_start <= scan->hit_start && hole_end >= scan->hit_end) break; } /* We should only be called after we found the hole previously */ DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); if (unlikely(&hole->hole_stack == &mm->hole_stack)) return NULL; DRM_MM_BUG_ON(hole_start > scan->hit_start); DRM_MM_BUG_ON(hole_end < scan->hit_end); mm->color_adjust(hole, scan->color, &hole_start, &hole_end); if (hole_start > scan->hit_start) return hole; if (hole_end < scan->hit_end) return list_next_entry(hole, node_list); return NULL; } EXPORT_SYMBOL(drm_mm_scan_color_evict); /** * drm_mm_init - initialize a drm-mm allocator * @mm: the drm_mm structure to initialize * @start: start of the range managed by @mm * @size: end of the range managed by @mm * * Note that @mm must be cleared to 0 before calling this function. */ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) { DRM_MM_BUG_ON(start + size <= start); mm->color_adjust = NULL; INIT_LIST_HEAD(&mm->hole_stack); mm->interval_tree = RB_ROOT_CACHED; mm->holes_size = RB_ROOT_CACHED; mm->holes_addr = RB_ROOT; /* Clever trick to avoid a special case in the free hole tracking. */ INIT_LIST_HEAD(&mm->head_node.node_list); mm->head_node.flags = 0; mm->head_node.mm = mm; mm->head_node.start = start + size; mm->head_node.size = -size; add_hole(&mm->head_node); mm->scan_active = 0; #ifdef CONFIG_DRM_DEBUG_MM stack_depot_init(); #endif } EXPORT_SYMBOL(drm_mm_init); /** * drm_mm_takedown - clean up a drm_mm allocator * @mm: drm_mm allocator to clean up * * Note that it is a bug to call this function on an allocator which is not * clean. */ void drm_mm_takedown(struct drm_mm *mm) { if (WARN(!drm_mm_clean(mm), "Memory manager not clean during takedown.\n")) show_leaks(mm); } EXPORT_SYMBOL(drm_mm_takedown); static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) { u64 start, size; size = entry->hole_size; if (size) { start = drm_mm_hole_node_start(entry); drm_printf(p, "%#018llx-%#018llx: %llu: free\n", start, start + size, size); } return size; } /** * drm_mm_print - print allocator state * @mm: drm_mm allocator to print * @p: DRM printer to use */ void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) { const struct drm_mm_node *entry; u64 total_used = 0, total_free = 0, total = 0; total_free += drm_mm_dump_hole(p, &mm->head_node); drm_mm_for_each_node(entry, mm) { drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, entry->start + entry->size, entry->size); total_used += entry->size; total_free += drm_mm_dump_hole(p, entry); } total = total_free + total_used; drm_printf(p, "total: %llu, used %llu free %llu\n", total, total_used, total_free); } EXPORT_SYMBOL(drm_mm_print);
1 1 1 1 1 1 1 1 28 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com> Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 queue specific routines. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include "rt2x00.h" #include "rt2x00lib.h" struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) { struct data_queue *queue = entry->queue; struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct sk_buff *skb; struct skb_frame_desc *skbdesc; unsigned int frame_size; unsigned int head_size = 0; unsigned int tail_size = 0; /* * The frame size includes descriptor size, because the * hardware directly receive the frame into the skbuffer. */ frame_size = queue->data_size + queue->desc_size + queue->winfo_size; /* * The payload should be aligned to a 4-byte boundary, * this means we need at least 3 bytes for moving the frame * into the correct offset. */ head_size = 4; /* * For IV/EIV/ICV assembly we must make sure there is * at least 8 bytes bytes available in headroom for IV/EIV * and 8 bytes for ICV data as tailroon. */ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) { head_size += 8; tail_size += 8; } /* * Allocate skbuffer. */ skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); if (!skb) return NULL; /* * Make sure we not have a frame with the requested bytes * available in the head and tail. */ skb_reserve(skb, head_size); skb_put(skb, frame_size); /* * Populate skbdesc. */ skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) { dma_addr_t skb_dma; skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { dev_kfree_skb_any(skb); return NULL; } skbdesc->skb_dma = skb_dma; skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; } return skb; } int rt2x00queue_map_txskb(struct queue_entry *entry) { struct device *dev = entry->queue->rt2x00dev->dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); skbdesc->skb_dma = dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) return -ENOMEM; skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; rt2x00lib_dmadone(entry); return 0; } EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); void rt2x00queue_unmap_skb(struct queue_entry *entry) { struct device *dev = entry->queue->rt2x00dev->dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, DMA_FROM_DEVICE); skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, DMA_TO_DEVICE); skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; } } EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); void rt2x00queue_free_skb(struct queue_entry *entry) { if (!entry->skb) return; rt2x00queue_unmap_skb(entry); dev_kfree_skb_any(entry->skb); entry->skb = NULL; } void rt2x00queue_align_frame(struct sk_buff *skb) { unsigned int frame_length = skb->len; unsigned int align = ALIGN_SIZE(skb, 0); if (!align) return; skb_push(skb, align); memmove(skb->data, skb->data + align, frame_length); skb_trim(skb, frame_length); } /* * H/W needs L2 padding between the header and the paylod if header size * is not 4 bytes aligned. */ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) { unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; skb_push(skb, l2pad); memmove(skb->data, skb->data + l2pad, hdr_len); } void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) { unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; memmove(skb->data + l2pad, skb->data, hdr_len); skb_pull(skb, l2pad); } static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); u16 seqno; if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) return; __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { /* * rt2800 has a H/W (or F/W) bug, device incorrectly increase * seqno on retransmitted data (non-QOS) and management frames. * To workaround the problem let's generate seqno in software. * Except for beacons which are transmitted periodically by H/W * hence hardware has to assign seqno for them. */ if (ieee80211_is_beacon(hdr->frame_control)) { __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); /* H/W will generate sequence number */ return; } __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); } /* * The hardware is not able to insert a sequence number. Assign a * software generated one here. * * This is wrong because beacons are not getting sequence * numbers assigned properly. * * A secondary problem exists for drivers that cannot toggle * sequence counting per-frame, since those will override the * sequence counter given by mac80211. */ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) seqno = atomic_add_return(0x10, &intf->seqno); else seqno = atomic_read(&intf->seqno); hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seqno); } static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc, const struct rt2x00_rate *hwrate) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; unsigned int data_length; unsigned int duration; unsigned int residual; /* * Determine with what IFS priority this frame should be send. * Set ifs to IFS_SIFS when the this is not the first fragment, * or this fragment came after RTS/CTS. */ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) txdesc->u.plcp.ifs = IFS_BACKOFF; else txdesc->u.plcp.ifs = IFS_SIFS; /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ data_length = skb->len + 4; data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); /* * PLCP setup * Length calculation depends on OFDM/CCK rate. */ txdesc->u.plcp.signal = hwrate->plcp; txdesc->u.plcp.service = 0x04; if (hwrate->flags & DEV_RATE_OFDM) { txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; txdesc->u.plcp.length_low = data_length & 0x3f; } else { /* * Convert length to microseconds. */ residual = GET_DURATION_RES(data_length, hwrate->bitrate); duration = GET_DURATION(data_length, hwrate->bitrate); if (residual != 0) { duration++; /* * Check if we need to set the Length Extension */ if (hwrate->bitrate == 110 && residual <= 30) txdesc->u.plcp.service |= 0x80; } txdesc->u.plcp.length_high = (duration >> 8) & 0xff; txdesc->u.plcp.length_low = duration & 0xff; /* * When preamble is enabled we should set the * preamble bit for the signal. */ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) txdesc->u.plcp.signal |= 0x08; } } static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc, struct ieee80211_sta *sta, const struct rt2x00_rate *hwrate) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rt2x00_sta *sta_priv = NULL; u8 density = 0; if (sta) { sta_priv = sta_to_rt2x00_sta(sta); txdesc->u.ht.wcid = sta_priv->wcid; density = sta->deflink.ht_cap.ampdu_density; } /* * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the * mcs rate to be used */ if (txrate->flags & IEEE80211_TX_RC_MCS) { txdesc->u.ht.mcs = txrate->idx; /* * MIMO PS should be set to 1 for STA's using dynamic SM PS * when using more then one tx stream (>MCS7). */ if (sta && txdesc->u.ht.mcs > 7 && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); } else { txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) txdesc->u.ht.mcs |= 0x08; } if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; else txdesc->u.ht.txop = TXOP_BACKOFF; /* Left zero on all other settings. */ return; } /* * Only one STBC stream is supported for now. */ if (tx_info->flags & IEEE80211_TX_CTL_STBC) txdesc->u.ht.stbc = 1; /* * This frame is eligible for an AMPDU, however, don't aggregate * frames that are intended to probe a specific tx rate. */ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); txdesc->u.ht.mpdu_density = density; txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ } /* * Set 40Mhz mode if necessary (for legacy rates this will * duplicate the frame to both channels). */ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || txrate->flags & IEEE80211_TX_RC_DUP_DATA) __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); /* * Determine IFS values * - Use TXOP_BACKOFF for management frames except beacons * - Use TXOP_SIFS for fragment bursts * - Use TXOP_HTTXOP for everything else * * Note: rt2800 devices won't use CTS protection (if used) * for frames not transmitted with TXOP_HTTXOP */ if (ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_is_beacon(hdr->frame_control)) txdesc->u.ht.txop = TXOP_BACKOFF; else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; else txdesc->u.ht.txop = TXOP_HTTXOP; } static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc, struct ieee80211_sta *sta) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; struct ieee80211_rate *rate; const struct rt2x00_rate *hwrate = NULL; memset(txdesc, 0, sizeof(*txdesc)); /* * Header and frame information. */ txdesc->length = skb->len; txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); /* * Check whether this frame is to be acked. */ if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) __set_bit(ENTRY_TXD_ACK, &txdesc->flags); /* * Check if this is a RTS/CTS frame */ if (ieee80211_is_rts(hdr->frame_control) || ieee80211_is_cts(hdr->frame_control)) { __set_bit(ENTRY_TXD_BURST, &txdesc->flags); if (ieee80211_is_rts(hdr->frame_control)) __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); else __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); } /* * Determine retry information. */ txdesc->retry_limit = tx_info->control.rates[0].count - 1; if (txdesc->retry_limit >= rt2x00dev->long_retry) __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); /* * Check if more fragments are pending */ if (ieee80211_has_morefrags(hdr->frame_control)) { __set_bit(ENTRY_TXD_BURST, &txdesc->flags); __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); } /* * Check if more frames (!= fragments) are pending */ if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) __set_bit(ENTRY_TXD_BURST, &txdesc->flags); /* * Beacons and probe responses require the tsf timestamp * to be inserted into the frame. */ if ((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && !(tx_info->flags & IEEE80211_TX_CTL_INJECTED)) __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); /* * Determine rate modulation. */ if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; else if (txrate->flags & IEEE80211_TX_RC_MCS) txdesc->rate_mode = RATE_MODE_HT_MIX; else { rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); hwrate = rt2x00_get_rate(rate->hw_value); if (hwrate->flags & DEV_RATE_OFDM) txdesc->rate_mode = RATE_MODE_OFDM; else txdesc->rate_mode = RATE_MODE_CCK; } /* * Apply TX descriptor handling by components */ rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC)) rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, sta, hwrate); else rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, hwrate); } static int rt2x00queue_write_tx_data(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; /* * This should not happen, we already checked the entry * was ours. When the hardware disagrees there has been * a queue corruption! */ if (unlikely(rt2x00dev->ops->lib->get_entry_state && rt2x00dev->ops->lib->get_entry_state(entry))) { rt2x00_err(rt2x00dev, "Corrupt queue %d, accessing entry which is not ours\n" "Please file bug report to %s\n", entry->queue->qid, DRV_PROJECT); return -EINVAL; } /* * Add the requested extra tx headroom in front of the skb. */ skb_push(entry->skb, rt2x00dev->extra_tx_headroom); memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); /* * Call the driver's write_tx_data function, if it exists. */ if (rt2x00dev->ops->lib->write_tx_data) rt2x00dev->ops->lib->write_tx_data(entry, txdesc); /* * Map the skb to DMA. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) && rt2x00queue_map_txskb(entry)) return -ENOMEM; return 0; } static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, struct txentry_desc *txdesc) { struct data_queue *queue = entry->queue; queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); /* * All processing on the frame has been completed, this means * it is now ready to be dumped to userspace through debugfs. */ rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); } static void rt2x00queue_kick_tx_queue(struct data_queue *queue, struct txentry_desc *txdesc) { /* * Check if we need to kick the queue, there are however a few rules * 1) Don't kick unless this is the last in frame in a burst. * When the burst flag is set, this frame is always followed * by another frame which in some way are related to eachother. * This is true for fragments, RTS or CTS-to-self frames. * 2) Rule 1 can be broken when the available entries * in the queue are less then a certain threshold. */ if (rt2x00queue_threshold(queue) || !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) queue->rt2x00dev->ops->lib->kick_queue(queue); } static void rt2x00queue_bar_check(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_bar *bar = (void *) (entry->skb->data + rt2x00dev->extra_tx_headroom); struct rt2x00_bar_list_entry *bar_entry; if (likely(!ieee80211_is_back_req(bar->frame_control))) return; bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); /* * If the alloc fails we still send the BAR out but just don't track * it in our bar list. And as a result we will report it to mac80211 * back as failed. */ if (!bar_entry) return; bar_entry->entry = entry; bar_entry->block_acked = 0; /* * Copy the relevant parts of the 802.11 BAR into out check list * such that we can use RCU for less-overhead in the RX path since * sending BARs and processing the according BlockAck should be * the exception. */ memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); bar_entry->control = bar->control; bar_entry->start_seq_num = bar->start_seq_num; /* * Insert BAR into our BAR check list. */ spin_lock_bh(&rt2x00dev->bar_list_lock); list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); spin_unlock_bh(&rt2x00dev->bar_list_lock); } int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, struct ieee80211_sta *sta, bool local) { struct ieee80211_tx_info *tx_info; struct queue_entry *entry; struct txentry_desc txdesc; struct skb_frame_desc *skbdesc; u8 rate_idx, rate_flags; int ret = 0; /* * Copy all TX descriptor information into txdesc, * after that we are free to use the skb->cb array * for our information. */ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); /* * All information is retrieved from the skb->cb array, * now we should claim ownership of the driver part of that * array, preserving the bitrate index and flags. */ tx_info = IEEE80211_SKB_CB(skb); rate_idx = tx_info->control.rates[0].idx; rate_flags = tx_info->control.rates[0].flags; skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); skbdesc->tx_rate_idx = rate_idx; skbdesc->tx_rate_flags = rate_flags; if (local) skbdesc->flags |= SKBDESC_NOT_MAC80211; /* * When hardware encryption is supported, and this frame * is to be encrypted, we should strip the IV/EIV data from * the frame so we can provide it to the driver separately. */ if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) rt2x00crypto_tx_copy_iv(skb, &txdesc); else rt2x00crypto_tx_remove_iv(skb, &txdesc); } /* * When DMA allocation is required we should guarantee to the * driver that the DMA is aligned to a 4-byte boundary. * However some drivers require L2 padding to pad the payload * rather then the header. This could be a requirement for * PCI and USB devices, while header alignment only is valid * for PCI devices. */ if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) rt2x00queue_insert_l2pad(skb, txdesc.header_length); else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) rt2x00queue_align_frame(skb); /* * That function must be called with bh disabled. */ spin_lock(&queue->tx_lock); if (unlikely(rt2x00queue_full(queue))) { rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", queue->qid); ret = -ENOBUFS; goto out; } entry = rt2x00queue_get_entry(queue, Q_INDEX); if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))) { rt2x00_err(queue->rt2x00dev, "Arrived at non-free entry in the non-full queue %d\n" "Please file bug report to %s\n", queue->qid, DRV_PROJECT); ret = -EINVAL; goto out; } entry->skb = skb; /* * It could be possible that the queue was corrupted and this * call failed. Since we always return NETDEV_TX_OK to mac80211, * this frame will simply be dropped. */ if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); entry->skb = NULL; ret = -EIO; goto out; } /* * Put BlockAckReqs into our check list for driver BA processing. */ rt2x00queue_bar_check(entry); set_bit(ENTRY_DATA_PENDING, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX); rt2x00queue_write_tx_descriptor(entry, &txdesc); rt2x00queue_kick_tx_queue(queue, &txdesc); out: /* * Pausing queue has to be serialized with rt2x00lib_txdone(), so we * do this under queue->tx_lock. Bottom halve was already disabled * before ieee80211_xmit() call. */ if (rt2x00queue_threshold(queue)) rt2x00queue_pause_queue(queue); spin_unlock(&queue->tx_lock); return ret; } int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); if (unlikely(!intf->beacon)) return -ENOBUFS; /* * Clean up the beacon skb. */ rt2x00queue_free_skb(intf->beacon); /* * Clear beacon (single bssid devices don't need to clear the beacon * since the beacon queue will get stopped anyway). */ if (rt2x00dev->ops->lib->clear_beacon) rt2x00dev->ops->lib->clear_beacon(intf->beacon); return 0; } int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); struct skb_frame_desc *skbdesc; struct txentry_desc txdesc; if (unlikely(!intf->beacon)) return -ENOBUFS; /* * Clean up the beacon skb. */ rt2x00queue_free_skb(intf->beacon); intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif, 0); if (!intf->beacon->skb) return -ENOMEM; /* * Copy all TX descriptor information into txdesc, * after that we are free to use the skb->cb array * for our information. */ rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); /* * Fill in skb descriptor */ skbdesc = get_skb_frame_desc(intf->beacon->skb); memset(skbdesc, 0, sizeof(*skbdesc)); /* * Send beacon to hardware. */ rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); return 0; } bool rt2x00queue_for_each_entry(struct data_queue *queue, enum queue_index start, enum queue_index end, void *data, bool (*fn)(struct queue_entry *entry, void *data)) { unsigned long irqflags; unsigned int index_start; unsigned int index_end; unsigned int i; if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index range (%d - %d)\n", start, end); return true; } /* * Only protect the range we are going to loop over, * if during our loop a extra entry is set to pending * it should not be kicked during this run, since it * is part of another TX operation. */ spin_lock_irqsave(&queue->index_lock, irqflags); index_start = queue->index[start]; index_end = queue->index[end]; spin_unlock_irqrestore(&queue->index_lock, irqflags); /* * Start from the TX done pointer, this guarantees that we will * send out all frames in the correct order. */ if (index_start < index_end) { for (i = index_start; i < index_end; i++) { if (fn(&queue->entries[i], data)) return true; } } else { for (i = index_start; i < queue->limit; i++) { if (fn(&queue->entries[i], data)) return true; } for (i = 0; i < index_end; i++) { if (fn(&queue->entries[i], data)) return true; } } return false; } EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, enum queue_index index) { struct queue_entry *entry; unsigned long irqflags; if (unlikely(index >= Q_INDEX_MAX)) { rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", index); return NULL; } spin_lock_irqsave(&queue->index_lock, irqflags); entry = &queue->entries[queue->index[index]]; spin_unlock_irqrestore(&queue->index_lock, irqflags); return entry; } EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) { struct data_queue *queue = entry->queue; unsigned long irqflags; if (unlikely(index >= Q_INDEX_MAX)) { rt2x00_err(queue->rt2x00dev, "Index change on invalid index type (%d)\n", index); return; } spin_lock_irqsave(&queue->index_lock, irqflags); queue->index[index]++; if (queue->index[index] >= queue->limit) queue->index[index] = 0; entry->last_action = jiffies; if (index == Q_INDEX) { queue->length++; } else if (index == Q_INDEX_DONE) { queue->length--; queue->count++; } spin_unlock_irqrestore(&queue->index_lock, irqflags); } static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) { switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: /* * For TX queues, we have to disable the queue * inside mac80211. */ ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); break; default: break; } } void rt2x00queue_pause_queue(struct data_queue *queue) { if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || !test_bit(QUEUE_STARTED, &queue->flags) || test_and_set_bit(QUEUE_PAUSED, &queue->flags)) return; rt2x00queue_pause_queue_nocheck(queue); } EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); void rt2x00queue_unpause_queue(struct data_queue *queue) { if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || !test_bit(QUEUE_STARTED, &queue->flags) || !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) return; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: /* * For TX queues, we have to enable the queue * inside mac80211. */ ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); break; case QID_RX: /* * For RX we need to kick the queue now in order to * receive frames. */ queue->rt2x00dev->ops->lib->kick_queue(queue); break; default: break; } } EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); void rt2x00queue_start_queue(struct data_queue *queue) { mutex_lock(&queue->status_lock); if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || test_and_set_bit(QUEUE_STARTED, &queue->flags)) { mutex_unlock(&queue->status_lock); return; } set_bit(QUEUE_PAUSED, &queue->flags); queue->rt2x00dev->ops->lib->start_queue(queue); rt2x00queue_unpause_queue(queue); mutex_unlock(&queue->status_lock); } EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); void rt2x00queue_stop_queue(struct data_queue *queue) { mutex_lock(&queue->status_lock); if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { mutex_unlock(&queue->status_lock); return; } rt2x00queue_pause_queue_nocheck(queue); queue->rt2x00dev->ops->lib->stop_queue(queue); mutex_unlock(&queue->status_lock); } EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) { bool tx_queue = (queue->qid == QID_AC_VO) || (queue->qid == QID_AC_VI) || (queue->qid == QID_AC_BE) || (queue->qid == QID_AC_BK); if (rt2x00queue_empty(queue)) return; /* * If we are not supposed to drop any pending * frames, this means we must force a start (=kick) * to the queue to make sure the hardware will * start transmitting. */ if (!drop && tx_queue) queue->rt2x00dev->ops->lib->kick_queue(queue); /* * Check if driver supports flushing, if that is the case we can * defer the flushing to the driver. Otherwise we must use the * alternative which just waits for the queue to become empty. */ if (likely(queue->rt2x00dev->ops->lib->flush_queue)) queue->rt2x00dev->ops->lib->flush_queue(queue, drop); /* * The queue flush has failed... */ if (unlikely(!rt2x00queue_empty(queue))) rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); } EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; /* * rt2x00queue_start_queue will call ieee80211_wake_queue * for each queue after is has been properly initialized. */ tx_queue_for_each(rt2x00dev, queue) rt2x00queue_start_queue(queue); rt2x00queue_start_queue(rt2x00dev->rx); } EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; /* * rt2x00queue_stop_queue will call ieee80211_stop_queue * as well, but we are completely shutting doing everything * now, so it is much safer to stop all TX queues at once, * and use rt2x00queue_stop_queue for cleaning up. */ ieee80211_stop_queues(rt2x00dev->hw); tx_queue_for_each(rt2x00dev, queue) rt2x00queue_stop_queue(queue); rt2x00queue_stop_queue(rt2x00dev->rx); } EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) { struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) rt2x00queue_flush_queue(queue, drop); rt2x00queue_flush_queue(rt2x00dev->rx, drop); } EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); static void rt2x00queue_reset(struct data_queue *queue) { unsigned long irqflags; unsigned int i; spin_lock_irqsave(&queue->index_lock, irqflags); queue->count = 0; queue->length = 0; for (i = 0; i < Q_INDEX_MAX; i++) queue->index[i] = 0; spin_unlock_irqrestore(&queue->index_lock, irqflags); } void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; unsigned int i; queue_for_each(rt2x00dev, queue) { rt2x00queue_reset(queue); for (i = 0; i < queue->limit; i++) rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); } } static int rt2x00queue_alloc_entries(struct data_queue *queue) { struct queue_entry *entries; unsigned int entry_size; unsigned int i; rt2x00queue_reset(queue); /* * Allocate all queue entries. */ entry_size = sizeof(*entries) + queue->priv_size; entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); if (!entries) return -ENOMEM; #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ (((char *)(__base)) + ((__limit) * (__esize)) + \ ((__index) * (__psize))) for (i = 0; i < queue->limit; i++) { entries[i].flags = 0; entries[i].queue = queue; entries[i].skb = NULL; entries[i].entry_idx = i; entries[i].priv_data = QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, sizeof(*entries), queue->priv_size); } #undef QUEUE_ENTRY_PRIV_OFFSET queue->entries = entries; return 0; } static void rt2x00queue_free_skbs(struct data_queue *queue) { unsigned int i; if (!queue->entries) return; for (i = 0; i < queue->limit; i++) { rt2x00queue_free_skb(&queue->entries[i]); } } static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) { unsigned int i; struct sk_buff *skb; for (i = 0; i < queue->limit; i++) { skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); if (!skb) return -ENOMEM; queue->entries[i].skb = skb; } return 0; } int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; int status; status = rt2x00queue_alloc_entries(rt2x00dev->rx); if (status) goto exit; tx_queue_for_each(rt2x00dev, queue) { status = rt2x00queue_alloc_entries(queue); if (status) goto exit; } status = rt2x00queue_alloc_entries(rt2x00dev->bcn); if (status) goto exit; if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) { status = rt2x00queue_alloc_entries(rt2x00dev->atim); if (status) goto exit; } status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); if (status) goto exit; return 0; exit: rt2x00_err(rt2x00dev, "Queue entries allocation failed\n"); rt2x00queue_uninitialize(rt2x00dev); return status; } void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; rt2x00queue_free_skbs(rt2x00dev->rx); queue_for_each(rt2x00dev, queue) { kfree(queue->entries); queue->entries = NULL; } } static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, enum data_queue_qid qid) { mutex_init(&queue->status_lock); spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->index_lock); queue->rt2x00dev = rt2x00dev; queue->qid = qid; queue->txop = 0; queue->aifs = 2; queue->cw_min = 5; queue->cw_max = 10; rt2x00dev->ops->queue_init(queue); queue->threshold = DIV_ROUND_UP(queue->limit, 10); } int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; enum data_queue_qid qid; unsigned int req_atim = rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE); /* * We need the following queues: * RX: 1 * TX: ops->tx_queues * Beacon: 1 * Atim: 1 (if required) */ rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); if (!queue) return -ENOMEM; /* * Initialize pointers */ rt2x00dev->rx = queue; rt2x00dev->tx = &queue[1]; rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; /* * Initialize queue parameters. * RX: qid = QID_RX * TX: qid = QID_AC_VO + index * TX: cw_min: 2^5 = 32. * TX: cw_max: 2^10 = 1024. * BCN: qid = QID_BEACON * ATIM: qid = QID_ATIM */ rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); qid = QID_AC_VO; tx_queue_for_each(rt2x00dev, queue) rt2x00queue_init(rt2x00dev, queue, qid++); rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); if (req_atim) rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); return 0; } void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rx); rt2x00dev->rx = NULL; rt2x00dev->tx = NULL; rt2x00dev->bcn = NULL; }
52 28 3202 3204 140 3171 3171 3210 3130 221 3166 3169 3171 3172 3179 3176 3096 3141 3144 3146 3146 3162 3171 3168 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 // SPDX-License-Identifier: GPL-2.0 /* * device.h - generic, centralized driver model * * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2008-2009 Novell Inc. * * See Documentation/driver-api/driver-model/ for more information. */ #ifndef _DEVICE_H_ #define _DEVICE_H_ #include <linux/dev_printk.h> #include <linux/energy_model.h> #include <linux/ioport.h> #include <linux/kobject.h> #include <linux/klist.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/compiler.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/pm.h> #include <linux/atomic.h> #include <linux/uidgid.h> #include <linux/gfp.h> #include <linux/device/bus.h> #include <linux/device/class.h> #include <linux/device/devres.h> #include <linux/device/driver.h> #include <linux/cleanup.h> #include <asm/device.h> struct device; struct device_private; struct device_driver; struct driver_private; struct module; struct class; struct subsys_private; struct device_node; struct fwnode_handle; struct iommu_group; struct dev_pin_info; struct dev_iommu; struct msi_device_data; /** * struct subsys_interface - interfaces to device functions * @name: name of the device function * @subsys: subsystem of the devices to attach to * @node: the list of functions registered at the subsystem * @add_dev: device hookup to device function handler * @remove_dev: device hookup to device function handler * * Simple interfaces attached to a subsystem. Multiple interfaces can * attach to a subsystem and its devices. Unlike drivers, they do not * exclusively claim or control devices. Interfaces usually represent * a specific functionality of a subsystem/class of devices. */ struct subsys_interface { const char *name; const struct bus_type *subsys; struct list_head node; int (*add_dev)(struct device *dev, struct subsys_interface *sif); void (*remove_dev)(struct device *dev, struct subsys_interface *sif); }; int subsys_interface_register(struct subsys_interface *sif); void subsys_interface_unregister(struct subsys_interface *sif); int subsys_system_register(const struct bus_type *subsys, const struct attribute_group **groups); int subsys_virtual_register(const struct bus_type *subsys, const struct attribute_group **groups); /* * The type of device, "struct device" is embedded in. A class * or bus can contain devices of different types * like "partitions" and "disks", "mouse" and "event". * This identifies the device type and carries type-specific * information, equivalent to the kobj_type of a kobject. * If "name" is specified, the uevent will contain it in * the DEVTYPE variable. */ struct device_type { const char *name; const struct attribute_group **groups; int (*uevent)(const struct device *dev, struct kobj_uevent_env *env); char *(*devnode)(const struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid); void (*release)(struct device *dev); const struct dev_pm_ops *pm; }; /** * struct device_attribute - Interface for exporting device attributes. * @attr: sysfs attribute definition. * @show: Show handler. * @store: Store handler. */ struct device_attribute { struct attribute attr; ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf); ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); }; /** * struct dev_ext_attribute - Exported device attribute with extra context. * @attr: Exported device attribute. * @var: Pointer to context. */ struct dev_ext_attribute { struct device_attribute attr; void *var; }; ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_int(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_int(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, char *buf); ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); ssize_t device_show_string(struct device *dev, struct device_attribute *attr, char *buf); /** * DEVICE_ATTR - Define a device attribute. * @_name: Attribute name. * @_mode: File mode. * @_show: Show handler. Optional, but mandatory if attribute is readable. * @_store: Store handler. Optional, but mandatory if attribute is writable. * * Convenience macro for defining a struct device_attribute. * * For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to: * * .. code-block:: c * * struct device_attribute dev_attr_foo = { * .attr = { .name = "foo", .mode = 0644 }, * .show = foo_show, * .store = foo_store, * }; */ #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) /** * DEVICE_ATTR_PREALLOC - Define a preallocated device attribute. * @_name: Attribute name. * @_mode: File mode. * @_show: Show handler. Optional, but mandatory if attribute is readable. * @_store: Store handler. Optional, but mandatory if attribute is writable. * * Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode. */ #define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR_PREALLOC(_name, _mode, _show, _store) /** * DEVICE_ATTR_RW - Define a read-write device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show, * and @_store is <_name>_store. */ #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) /** * DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR_RW(), but @_mode is 0600. */ #define DEVICE_ATTR_ADMIN_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600) /** * DEVICE_ATTR_RO - Define a readable device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show. */ #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) /** * DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR_RO(), but @_mode is 0400. */ #define DEVICE_ATTR_ADMIN_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400) /** * DEVICE_ATTR_WO - Define an admin-only writable device attribute. * @_name: Attribute name. * * Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store. */ #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) /** * DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of unsigned long. * * Like DEVICE_ATTR(), but @_show and @_store are automatically provided * such that reads and writes to the attribute from userspace affect @_var. */ #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } /** * DEVICE_INT_ATTR - Define a device attribute backed by an int. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of int. * * Like DEVICE_ULONG_ATTR(), but @_var is an int. */ #define DEVICE_INT_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) } /** * DEVICE_BOOL_ATTR - Define a device attribute backed by a bool. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of bool. * * Like DEVICE_ULONG_ATTR(), but @_var is a bool. */ #define DEVICE_BOOL_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) } /** * DEVICE_STRING_ATTR_RO - Define a device attribute backed by a r/o string. * @_name: Attribute name. * @_mode: File mode. * @_var: Identifier of string. * * Like DEVICE_ULONG_ATTR(), but @_var is a string. Because the length of the * string allocation is unknown, the attribute must be read-only. */ #define DEVICE_STRING_ATTR_RO(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, (_mode) & ~0222, device_show_string, NULL), (_var) } #define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) int device_create_file(struct device *device, const struct device_attribute *entry); void device_remove_file(struct device *dev, const struct device_attribute *attr); bool device_remove_file_self(struct device *dev, const struct device_attribute *attr); int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr); /* allows to add/remove a custom action to devres stack */ int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data); /** * devm_remove_action() - removes previously added custom action * @dev: Device that owns the action * @action: Function implementing the action * @data: Pointer to data passed to @action implementation * * Removes instance of @action previously added by devm_add_action(). * Both action and data should match one of the existing entries. */ static inline void devm_remove_action(struct device *dev, void (*action)(void *), void *data) { WARN_ON(devm_remove_action_nowarn(dev, action, data)); } void devm_release_action(struct device *dev, void (*action)(void *), void *data); int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name); #define devm_add_action(dev, action, data) \ __devm_add_action(dev, action, data, #action) static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *), void *data, const char *name) { int ret; ret = __devm_add_action(dev, action, data, name); if (ret) action(data); return ret; } #define devm_add_action_or_reset(dev, action, data) \ __devm_add_action_or_reset(dev, action, data, #action) /** * devm_alloc_percpu - Resource-managed alloc_percpu * @dev: Device to allocate per-cpu memory for * @type: Type to allocate per-cpu memory for * * Managed alloc_percpu. Per-cpu memory allocated with this function is * automatically freed on driver detach. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ #define devm_alloc_percpu(dev, type) \ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ __alignof__(type))) void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align); void devm_free_percpu(struct device *dev, void __percpu *pdata); struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about * sg limitations. */ unsigned int max_segment_size; unsigned int min_align_mask; unsigned long segment_boundary_mask; }; /** * enum device_link_state - Device link states. * @DL_STATE_NONE: The presence of the drivers is not being tracked. * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not. * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present). * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present. * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding. */ enum device_link_state { DL_STATE_NONE = -1, DL_STATE_DORMANT = 0, DL_STATE_AVAILABLE, DL_STATE_CONSUMER_PROBE, DL_STATE_ACTIVE, DL_STATE_SUPPLIER_UNBIND, }; /* * Device link flags. * * STATELESS: The core will not remove this link automatically. * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. * PM_RUNTIME: If set, the runtime PM framework will use this link. * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. * MANAGED: The core tracks presence of supplier/consumer drivers (internal). * SYNC_STATE_ONLY: Link only affects sync_state() behavior. * INFERRED: Inferred from data (eg: firmware) and not from driver actions. */ #define DL_FLAG_STATELESS BIT(0) #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) #define DL_FLAG_PM_RUNTIME BIT(2) #define DL_FLAG_RPM_ACTIVE BIT(3) #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) #define DL_FLAG_MANAGED BIT(6) #define DL_FLAG_SYNC_STATE_ONLY BIT(7) #define DL_FLAG_INFERRED BIT(8) #define DL_FLAG_CYCLE BIT(9) /** * enum dl_dev_state - Device driver presence tracking information. * @DL_DEV_NO_DRIVER: There is no driver attached to the device. * @DL_DEV_PROBING: A driver is probing. * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device. * @DL_DEV_UNBINDING: The driver is unbinding from the device. */ enum dl_dev_state { DL_DEV_NO_DRIVER = 0, DL_DEV_PROBING, DL_DEV_DRIVER_BOUND, DL_DEV_UNBINDING, }; /** * enum device_removable - Whether the device is removable. The criteria for a * device to be classified as removable is determined by its subsystem or bus. * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this * device (default). * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown. * @DEVICE_FIXED: Device is not removable by the user. * @DEVICE_REMOVABLE: Device is removable by the user. */ enum device_removable { DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */ DEVICE_REMOVABLE_UNKNOWN, DEVICE_FIXED, DEVICE_REMOVABLE, }; /** * struct dev_links_info - Device data related to device links. * @suppliers: List of links to supplier devices. * @consumers: List of links to consumer devices. * @defer_sync: Hook to global list of devices that have deferred sync_state. * @status: Driver status information. */ struct dev_links_info { struct list_head suppliers; struct list_head consumers; struct list_head defer_sync; enum dl_dev_state status; }; /** * struct dev_msi_info - Device data related to MSI * @domain: The MSI interrupt domain associated to the device * @data: Pointer to MSI device data */ struct dev_msi_info { #ifdef CONFIG_GENERIC_MSI_IRQ struct irq_domain *domain; struct msi_device_data *data; #endif }; /** * enum device_physical_location_panel - Describes which panel surface of the * system's housing the device connection point resides on. * @DEVICE_PANEL_TOP: Device connection point is on the top panel. * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel. * @DEVICE_PANEL_LEFT: Device connection point is on the left panel. * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel. * @DEVICE_PANEL_FRONT: Device connection point is on the front panel. * @DEVICE_PANEL_BACK: Device connection point is on the back panel. * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown. */ enum device_physical_location_panel { DEVICE_PANEL_TOP, DEVICE_PANEL_BOTTOM, DEVICE_PANEL_LEFT, DEVICE_PANEL_RIGHT, DEVICE_PANEL_FRONT, DEVICE_PANEL_BACK, DEVICE_PANEL_UNKNOWN, }; /** * enum device_physical_location_vertical_position - Describes vertical * position of the device connection point on the panel surface. * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel. * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel. * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel. */ enum device_physical_location_vertical_position { DEVICE_VERT_POS_UPPER, DEVICE_VERT_POS_CENTER, DEVICE_VERT_POS_LOWER, }; /** * enum device_physical_location_horizontal_position - Describes horizontal * position of the device connection point on the panel surface. * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel. * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel. * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel. */ enum device_physical_location_horizontal_position { DEVICE_HORI_POS_LEFT, DEVICE_HORI_POS_CENTER, DEVICE_HORI_POS_RIGHT, }; /** * struct device_physical_location - Device data related to physical location * of the device connection point. * @panel: Panel surface of the system's housing that the device connection * point resides on. * @vertical_position: Vertical position of the device connection point within * the panel. * @horizontal_position: Horizontal position of the device connection point * within the panel. * @dock: Set if the device connection point resides in a docking station or * port replicator. * @lid: Set if this device connection point resides on the lid of laptop * system. */ struct device_physical_location { enum device_physical_location_panel panel; enum device_physical_location_vertical_position vertical_position; enum device_physical_location_horizontal_position horizontal_position; bool dock; bool lid; }; /** * struct device - The basic device structure * @parent: The device's "parent" device, the device to which it is attached. * In most cases, a parent device is some sort of bus or host * controller. If parent is NULL, the device, is a top-level device, * which is not usually what you want. * @p: Holds the private data of the driver core portions of the device. * See the comment of the struct device_private for detail. * @kobj: A top-level, abstract class from which other classes are derived. * @init_name: Initial name of the device. * @type: The type of device. * This identifies the device type and carries type-specific * information. * @mutex: Mutex to synchronize calls to its driver. * @bus: Type of bus device is on. * @driver: Which driver has allocated this * @platform_data: Platform data specific to the device. * Example: For devices on custom boards, as typical of embedded * and SOC based hardware, Linux often uses platform_data to point * to board-specific structures describing devices and how they * are wired. That can include what ports are available, chip * variants, which GPIO pins act in what additional roles, and so * on. This shrinks the "Board Support Packages" (BSPs) and * minimizes board-specific #ifdefs in drivers. * @driver_data: Private pointer for driver specific info. * @links: Links to suppliers and consumers of this device. * @power: For device power management. * See Documentation/driver-api/pm/devices.rst for details. * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. * @em_pd: device's energy model performance domain * @pins: For device pin management. * See Documentation/driver-api/pin-control.rst for details. * @msi: MSI related data * @numa_node: NUMA node this device is close to. * @dma_ops: DMA mapping operations for this device. * @dma_mask: Dma mask (if dma'ble device). * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller * DMA limit than the device itself supports. * @dma_range_map: map for DMA memory ranges relative to that of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use. * @dma_io_tlb_pools: List of transient swiotlb memory pools. * @dma_io_tlb_lock: Protects changes to the list of active pools. * @dma_uses_io_tlb: %true if device has used the software IO TLB. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. * @devt: For creating the sysfs "dev". * @id: device instance * @devres_lock: Spinlock to protect the resource of the device. * @devres_head: The resources list of the device. * @class: The class of the device. * @groups: Optional attribute groups. * @release: Callback to free the device after all references have * gone away. This should be set by the allocator of the * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu: Per device generic IOMMU runtime data * @physical_location: Describes physical location of the device connection * point in the system housing. * @removable: Whether the device can be removed from the system. This * should be set by the subsystem / bus driver that discovered * the device. * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). * @of_node_reused: Set if the device-tree node is shared with an ancestor * device. * @state_synced: The hardware state of this device has been synced to match * the software state of this device by calling the driver/bus * sync_state() callback. * @can_match: The device has matched with a driver at least once or it is in * a bus (like AMBA) which can't check for matching drivers until * other devices probe successfully. * @dma_coherent: this particular device is dma coherent, even if the * architecture supports non-coherent devices. * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the * streaming DMA operations (->map_* / ->unmap_* / ->sync_*), * and optionall (if the coherent mask is large enough) also * for dma allocations. This flag is managed by the dma ops * instance from ->dma_supported. * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers. * @dma_iommu: Device is using default IOMMU implementation for DMA and * doesn't rely on dma_ops structure. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information * that the device model core needs to model the system. Most subsystems, * however, track additional information about the devices they host. As a * result, it is rare for devices to be represented by bare device structures; * instead, that structure, like kobject structures, is usually embedded within * a higher-level representation of the device. */ struct device { struct kobject kobj; struct device *parent; struct device_private *p; const char *init_name; /* initial name of the device */ const struct device_type *type; const struct bus_type *bus; /* type of bus device is on */ struct device_driver *driver; /* which driver has allocated this device */ void *platform_data; /* Platform specific data, device core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set_drvdata/dev_get_drvdata */ struct mutex mutex; /* mutex to synchronize calls to * its driver. */ struct dev_links_info links; struct dev_pm_info power; struct dev_pm_domain *pm_domain; #ifdef CONFIG_ENERGY_MODEL struct em_perf_domain *em_pd; #endif #ifdef CONFIG_PINCTRL struct dev_pin_info *pins; #endif struct dev_msi_info msi; #ifdef CONFIG_ARCH_HAS_DMA_OPS const struct dma_map_ops *dma_ops; #endif u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ u64 bus_dma_limit; /* upstream dma constraint */ const struct bus_dma_region *dma_range_map; struct device_dma_parameters *dma_parms; struct list_head dma_pools; /* dma pools (if dma'ble) */ #ifdef CONFIG_DMA_DECLARE_COHERENT struct dma_coherent_mem *dma_mem; /* internal for coherent mem override */ #endif #ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ #endif #ifdef CONFIG_SWIOTLB struct io_tlb_mem *dma_io_tlb_mem; #endif #ifdef CONFIG_SWIOTLB_DYNAMIC struct list_head dma_io_tlb_pools; spinlock_t dma_io_tlb_lock; bool dma_uses_io_tlb; #endif /* arch specific additions */ struct dev_archdata archdata; struct device_node *of_node; /* associated device tree node */ struct fwnode_handle *fwnode; /* firmware device node */ #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif dev_t devt; /* dev_t, creates the sysfs "dev" */ u32 id; /* device instance */ spinlock_t devres_lock; struct list_head devres_head; const struct class *class; const struct attribute_group **groups; /* optional groups */ void (*release)(struct device *dev); struct iommu_group *iommu_group; struct dev_iommu *iommu; struct device_physical_location *physical_location; enum device_removable removable; bool offline_disabled:1; bool offline:1; bool of_node_reused:1; bool state_synced:1; bool can_match:1; #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) bool dma_coherent:1; #endif #ifdef CONFIG_DMA_OPS_BYPASS bool dma_ops_bypass : 1; #endif #ifdef CONFIG_DMA_NEED_SYNC bool dma_skip_sync:1; #endif #ifdef CONFIG_IOMMU_DMA bool dma_iommu:1; #endif }; /** * struct device_link - Device link representation. * @supplier: The device on the supplier end of the link. * @s_node: Hook to the supplier device's list of links to consumers. * @consumer: The device on the consumer end of the link. * @c_node: Hook to the consumer device's list of links to suppliers. * @link_dev: device used to expose link details in sysfs * @status: The state of the link (with respect to the presence of drivers). * @flags: Link flags. * @rpm_active: Whether or not the consumer device is runtime-PM-active. * @kref: Count repeated addition of the same link. * @rm_work: Work structure used for removing the link. * @supplier_preactivated: Supplier has been made active before consumer probe. */ struct device_link { struct device *supplier; struct list_head s_node; struct device *consumer; struct list_head c_node; struct device link_dev; enum device_link_state status; u32 flags; refcount_t rpm_active; struct kref kref; struct work_struct rm_work; bool supplier_preactivated; /* Owned by consumer probe. */ }; #define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj) /** * device_iommu_mapped - Returns true when the device DMA is translated * by an IOMMU * @dev: Device to perform the check on */ static inline bool device_iommu_mapped(struct device *dev) { return (dev->iommu_group != NULL); } /* Get the wakeup routines, which depend on struct device */ #include <linux/pm_wakeup.h> /** * dev_name - Return a device's name. * @dev: Device with name to get. * Return: The kobject name of the device, or its initial name if unavailable. */ static inline const char *dev_name(const struct device *dev) { /* Use the init name until the kobject becomes available */ if (dev->init_name) return dev->init_name; return kobject_name(&dev->kobj); } /** * dev_bus_name - Return a device's bus/class name, if at all possible * @dev: struct device to get the bus/class name of * * Will return the name of the bus/class the device is attached to. If it is * not attached to a bus/class, an empty string will be returned. */ static inline const char *dev_bus_name(const struct device *dev) { return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : ""); } __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...); #ifdef CONFIG_NUMA static inline int dev_to_node(struct device *dev) { return dev->numa_node; } static inline void set_dev_node(struct device *dev, int node) { dev->numa_node = node; } #else static inline int dev_to_node(struct device *dev) { return NUMA_NO_NODE; } static inline void set_dev_node(struct device *dev, int node) { } #endif static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) { #ifdef CONFIG_GENERIC_MSI_IRQ return dev->msi.domain; #else return NULL; #endif } static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) { #ifdef CONFIG_GENERIC_MSI_IRQ dev->msi.domain = d; #endif } static inline void *dev_get_drvdata(const struct device *dev) { return dev->driver_data; } static inline void dev_set_drvdata(struct device *dev, void *data) { dev->driver_data = data; } static inline struct pm_subsys_data *dev_to_psd(struct device *dev) { return dev ? dev->power.subsys_data : NULL; } static inline unsigned int dev_get_uevent_suppress(const struct device *dev) { return dev->kobj.uevent_suppress; } static inline void dev_set_uevent_suppress(struct device *dev, int val) { dev->kobj.uevent_suppress = val; } static inline int device_is_registered(struct device *dev) { return dev->kobj.state_in_sysfs; } static inline void device_enable_async_suspend(struct device *dev) { if (!dev->power.is_prepared) dev->power.async_suspend = true; } static inline void device_disable_async_suspend(struct device *dev) { if (!dev->power.is_prepared) dev->power.async_suspend = false; } static inline bool device_async_suspend_enabled(struct device *dev) { return !!dev->power.async_suspend; } static inline bool device_pm_not_required(struct device *dev) { return dev->power.no_pm; } static inline void device_set_pm_not_required(struct device *dev) { dev->power.no_pm = true; } static inline void dev_pm_syscore_device(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP dev->power.syscore = val; #endif } static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags) { dev->power.driver_flags = flags; } static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags) { return !!(dev->power.driver_flags & flags); } static inline bool dev_pm_smart_suspend(struct device *dev) { #ifdef CONFIG_PM_SLEEP return dev->power.smart_suspend; #else return false; #endif } static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); } static inline int device_lock_interruptible(struct device *dev) { return mutex_lock_interruptible(&dev->mutex); } static inline int device_trylock(struct device *dev) { return mutex_trylock(&dev->mutex); } static inline void device_unlock(struct device *dev) { mutex_unlock(&dev->mutex); } DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T)) static inline void device_lock_assert(struct device *dev) { lockdep_assert_held(&dev->mutex); } static inline bool dev_has_sync_state(struct device *dev) { if (!dev) return false; if (dev->driver && dev->driver->sync_state) return true; if (dev->bus && dev->bus->sync_state) return true; return false; } static inline void dev_set_removable(struct device *dev, enum device_removable removable) { dev->removable = removable; } static inline bool dev_is_removable(struct device *dev) { return dev->removable == DEVICE_REMOVABLE; } static inline bool dev_removable_is_valid(struct device *dev) { return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED; } /* * High level routines for use by the bus drivers */ int __must_check device_register(struct device *dev); void device_unregister(struct device *dev); void device_initialize(struct device *dev); int __must_check device_add(struct device *dev); void device_del(struct device *dev); DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T)) int device_for_each_child(struct device *parent, void *data, device_iter_t fn); int device_for_each_child_reverse(struct device *parent, void *data, device_iter_t fn); int device_for_each_child_reverse_from(struct device *parent, struct device *from, void *data, device_iter_t fn); struct device *device_find_child(struct device *parent, const void *data, device_match_t match); /** * device_find_child_by_name - device iterator for locating a child device. * @parent: parent struct device * @name: name of the child device * * This is similar to the device_find_child() function above, but it * returns a reference to a device that has the name @name. * * NOTE: you will need to drop the reference with put_device() after use. */ static inline struct device *device_find_child_by_name(struct device *parent, const char *name) { return device_find_child(parent, name, device_match_name); } /** * device_find_any_child - device iterator for locating a child device, if any. * @parent: parent struct device * * This is similar to the device_find_child() function above, but it * returns a reference to a child device, if any. * * NOTE: you will need to drop the reference with put_device() after use. */ static inline struct device *device_find_any_child(struct device *parent) { return device_find_child(parent, NULL, device_match_any); } int device_rename(struct device *dev, const char *new_name); int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); static inline bool device_supports_offline(struct device *dev) { return dev->bus && dev->bus->offline && dev->bus->online; } #define __device_lock_set_class(dev, name, key) \ do { \ struct device *__d2 __maybe_unused = dev; \ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \ } while (0) /** * device_lock_set_class - Specify a temporary lock class while a device * is attached to a driver * @dev: device to modify * @key: lock class key data * * This must be called with the device_lock() already held, for example * from driver ->probe(). Take care to only override the default * lockdep_no_validate class. */ #ifdef CONFIG_LOCKDEP #define device_lock_set_class(dev, key) \ do { \ struct device *__d = dev; \ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \ &__lockdep_no_validate__), \ "overriding existing custom lock class\n"); \ __device_lock_set_class(__d, #key, key); \ } while (0) #else #define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key) #endif /** * device_lock_reset_class - Return a device to the default lockdep novalidate state * @dev: device to modify * * This must be called with the device_lock() already held, for example * from driver ->remove(). */ #define device_lock_reset_class(dev) \ do { \ struct device *__d __maybe_unused = dev; \ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \ _THIS_IP_); \ } while (0) void lock_device_hotplug(void); void unlock_device_hotplug(void); int lock_device_hotplug_sysfs(void); int device_offline(struct device *dev); int device_online(struct device *dev); void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_node(struct device *dev, struct fwnode_handle *fwnode); int device_add_of_node(struct device *dev, struct device_node *of_node); void device_remove_of_node(struct device *dev); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); static inline struct device_node *dev_of_node(struct device *dev) { if (!IS_ENABLED(CONFIG_OF) || !dev) return NULL; return dev->of_node; } static inline int dev_num_vf(struct device *dev) { if (dev->bus && dev->bus->num_vf) return dev->bus->num_vf(dev); return 0; } /* * Root device objects for grouping under /sys/devices */ struct device *__root_device_register(const char *name, struct module *owner); /* This is a macro to avoid include problems with THIS_MODULE */ #define root_device_register(name) \ __root_device_register(name, THIS_MODULE) void root_device_unregister(struct device *root); static inline void *dev_get_platdata(const struct device *dev) { return dev->platform_data; } /* * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ int __must_check device_driver_attach(const struct device_driver *drv, struct device *dev); int __must_check device_bind_driver(struct device *dev); void device_release_driver(struct device *dev); int __must_check device_attach(struct device *dev); int __must_check driver_attach(const struct device_driver *drv); void device_initial_probe(struct device *dev); int __must_check device_reprobe(struct device *dev); bool device_is_bound(struct device *dev); /* * Easy functions for dynamically creating devices on the fly */ __printf(5, 6) struct device * device_create(const struct class *cls, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); __printf(6, 7) struct device * device_create_with_groups(const struct class *cls, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, ...); void device_destroy(const struct class *cls, dev_t devt); int __must_check device_add_groups(struct device *dev, const struct attribute_group **groups); void device_remove_groups(struct device *dev, const struct attribute_group **groups); static inline int __must_check device_add_group(struct device *dev, const struct attribute_group *grp) { const struct attribute_group *groups[] = { grp, NULL }; return device_add_groups(dev, groups); } static inline void device_remove_group(struct device *dev, const struct attribute_group *grp) { const struct attribute_group *groups[] = { grp, NULL }; device_remove_groups(dev, groups); } int __must_check devm_device_add_group(struct device *dev, const struct attribute_group *grp); /* * get_device - atomically increment the reference count for the device. * */ struct device *get_device(struct device *dev); void put_device(struct device *dev); DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T)) bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS int devtmpfs_mount(void); #else static inline int devtmpfs_mount(void) { return 0; } #endif /* drivers/base/power/shutdown.c */ void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ const char *dev_driver_string(const struct device *dev); /* Device links interface. */ struct device_link *device_link_add(struct device *consumer, struct device *supplier, u32 flags); void device_link_del(struct device_link *link); void device_link_remove(void *consumer, struct device *supplier); void device_links_supplier_sync_state_pause(void); void device_links_supplier_sync_state_resume(void); void device_link_wait_removal(void); /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ MODULE_ALIAS("char-major-" __stringify(major) "-*") #endif /* _DEVICE_H_ */
12 12 12 11 11 11 11 11 11 5 5 2 3 5 5 11 11 3 3 1 3 1 3 1 3 17 4 4 3 4 17 17 14 14 14 2 12 1 11 12 11 2 11 10 11 11 11 11 11 11 11 11 11 1 10 10 1 9 9 1 8 8 1 7 7 1 6 6 1 5 5 1 4 4 2 2 1 1 2 2 1 1 11 11 2 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 // SPDX-License-Identifier: GPL-2.0+ /* * Clean ups from Moschip version and a few ioctl implementations by: * Paul B Schroeder <pschroeder "at" uplogix "dot" com> * * Originally based on drivers/usb/serial/io_edgeport.c which is: * Copyright (C) 2000 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/uaccess.h> #define DRIVER_DESC "Moschip 7840/7820 USB Serial Driver" /* * 16C50 UART register defines */ #define LCR_BITS_5 0x00 /* 5 bits/char */ #define LCR_BITS_6 0x01 /* 6 bits/char */ #define LCR_BITS_7 0x02 /* 7 bits/char */ #define LCR_BITS_8 0x03 /* 8 bits/char */ #define LCR_BITS_MASK 0x03 /* Mask for bits/char field */ #define LCR_STOP_1 0x00 /* 1 stop bit */ #define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */ #define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */ #define LCR_STOP_MASK 0x04 /* Mask for stop bits field */ #define LCR_PAR_NONE 0x00 /* No parity */ #define LCR_PAR_ODD 0x08 /* Odd parity */ #define LCR_PAR_EVEN 0x18 /* Even parity */ #define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */ #define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */ #define LCR_PAR_MASK 0x38 /* Mask for parity field */ #define LCR_SET_BREAK 0x40 /* Set Break condition */ #define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */ #define MCR_DTR 0x01 /* Assert DTR */ #define MCR_RTS 0x02 /* Assert RTS */ #define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */ #define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */ #define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */ #define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */ #define MOS7840_MSR_CTS 0x10 /* Current state of CTS */ #define MOS7840_MSR_DSR 0x20 /* Current state of DSR */ #define MOS7840_MSR_RI 0x40 /* Current state of RI */ #define MOS7840_MSR_CD 0x80 /* Current state of CD */ /* * Defines used for sending commands to port */ #define MOS_WDR_TIMEOUT 5000 /* default urb timeout */ /* Requests */ #define MCS_RD_RTYPE 0xC0 #define MCS_WR_RTYPE 0x40 #define MCS_RDREQ 0x0D #define MCS_WRREQ 0x0E #define VENDOR_READ_LENGTH (0x01) #define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */ #define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */ /* * Vendor id and device id defines * * NOTE: Do not add new defines, add entries directly to the id_table instead. */ #define USB_VENDOR_ID_BANDB 0x0856 #define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 #define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00 #define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 #define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01 #define BANDB_DEVICE_ID_US9ML2_2 0xAC29 #define BANDB_DEVICE_ID_US9ML2_4 0xAC30 #define BANDB_DEVICE_ID_USPTL4_2 0xAC31 #define BANDB_DEVICE_ID_USPTL4_4 0xAC32 #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 /* Interrupt Routine Defines */ #define SERIAL_IIR_RLS 0x06 #define SERIAL_IIR_MS 0x00 /* * Emulation of the bit mask on the LINE STATUS REGISTER. */ #define SERIAL_LSR_DR 0x0001 #define SERIAL_LSR_OE 0x0002 #define SERIAL_LSR_PE 0x0004 #define SERIAL_LSR_FE 0x0008 #define SERIAL_LSR_BI 0x0010 #define MOS_MSR_DELTA_CTS 0x10 #define MOS_MSR_DELTA_DSR 0x20 #define MOS_MSR_DELTA_RI 0x40 #define MOS_MSR_DELTA_CD 0x80 /* Serial Port register Address */ #define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01)) #define FIFO_CONTROL_REGISTER ((__u16)(0x02)) #define LINE_CONTROL_REGISTER ((__u16)(0x03)) #define MODEM_CONTROL_REGISTER ((__u16)(0x04)) #define LINE_STATUS_REGISTER ((__u16)(0x05)) #define MODEM_STATUS_REGISTER ((__u16)(0x06)) #define SCRATCH_PAD_REGISTER ((__u16)(0x07)) #define DIVISOR_LATCH_LSB ((__u16)(0x00)) #define DIVISOR_LATCH_MSB ((__u16)(0x01)) #define CLK_MULTI_REGISTER ((__u16)(0x02)) #define CLK_START_VALUE_REGISTER ((__u16)(0x03)) #define GPIO_REGISTER ((__u16)(0x07)) #define SERIAL_LCR_DLAB ((__u16)(0x0080)) /* * URB POOL related defines */ #define NUM_URBS 16 /* URB Count */ #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ /* LED on/off milliseconds*/ #define LED_ON_MS 500 #define LED_OFF_MS 500 enum mos7840_flag { MOS7840_FLAG_LED_BUSY, }; #define MCS_PORT_MASK GENMASK(2, 0) #define MCS_PORTS(nr) ((nr) & MCS_PORT_MASK) #define MCS_LED BIT(3) #define MCS_DEVICE(vid, pid, flags) \ USB_DEVICE((vid), (pid)), .driver_info = (flags) static const struct usb_device_id id_table[] = { { MCS_DEVICE(0x0557, 0x2011, MCS_PORTS(4)) }, /* ATEN UC2324 */ { MCS_DEVICE(0x0557, 0x7820, MCS_PORTS(2)) }, /* ATEN UC2322 */ { MCS_DEVICE(0x110a, 0x2210, MCS_PORTS(2)) }, /* Moxa UPort 2210 */ { MCS_DEVICE(0x9710, 0x7810, MCS_PORTS(1) | MCS_LED) }, /* ASIX MCS7810 */ { MCS_DEVICE(0x9710, 0x7820, MCS_PORTS(2)) }, /* MosChip MCS7820 */ { MCS_DEVICE(0x9710, 0x7840, MCS_PORTS(4)) }, /* MosChip MCS7840 */ { MCS_DEVICE(0x9710, 0x7843, MCS_PORTS(3)) }, /* ASIX MCS7840 3 port */ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) }, { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) }, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); /* This structure holds all of the local port information */ struct moschip_port { int port_num; /*Actual port number in the device(1,2,etc) */ struct urb *read_urb; /* read URB for this port */ __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ struct usb_serial_port *port; /* loop back to the owner of this object */ /* Offsets */ __u8 SpRegOffset; __u8 ControlRegOffset; __u8 DcrRegOffset; spinlock_t pool_lock; struct urb *write_urb_pool[NUM_URBS]; char busy[NUM_URBS]; bool read_urb_busy; /* For device(s) with LED indicator */ bool has_led; struct timer_list led_timer1; /* Timer for LED on */ struct timer_list led_timer2; /* Timer for LED off */ struct urb *led_urb; struct usb_ctrlrequest *led_dr; unsigned long flags; }; /* * mos7840_set_reg_sync * To set the Control register by calling usb_fill_control_urb function * by passing usb_sndctrlpipe function as parameter. */ static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; val = val & 0x00ff; dev_dbg(&port->dev, "mos7840_set_reg_sync offset is %x, value %x\n", reg, val); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } /* * mos7840_get_reg_sync * To set the Uart register by calling usb_fill_control_urb function by * passing usb_rcvctrlpipe function as parameter. */ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val) { struct usb_device *dev = port->serial->dev; int ret = 0; u8 *buf; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); if (ret < VENDOR_READ_LENGTH) { if (ret >= 0) ret = -EIO; goto out; } *val = buf[0]; dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val); out: kfree(buf); return ret; } /* * mos7840_set_uart_reg * To set the Uart register by calling usb_fill_control_urb function by * passing usb_sndctrlpipe function as parameter. */ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; val = val & 0x00ff; /* For the UART control registers, the application number need to be Or'ed */ if (port->serial->num_ports == 2 && port->port_number != 0) val |= ((__u16)port->port_number + 2) << 8; else val |= ((__u16)port->port_number + 1) << 8; dev_dbg(&port->dev, "%s application number is %x\n", __func__, val); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } /* * mos7840_get_uart_reg * To set the Control register by calling usb_fill_control_urb function * by passing usb_rcvctrlpipe function as parameter. */ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val) { struct usb_device *dev = port->serial->dev; int ret = 0; __u16 Wval; u8 *buf; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; /* Wval is same as application number */ if (port->serial->num_ports == 2 && port->port_number != 0) Wval = ((__u16)port->port_number + 2) << 8; else Wval = ((__u16)port->port_number + 1) << 8; dev_dbg(&port->dev, "%s application number is %x\n", __func__, Wval); ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); if (ret < VENDOR_READ_LENGTH) { if (ret >= 0) ret = -EIO; goto out; } *val = buf[0]; out: kfree(buf); return ret; } static void mos7840_dump_serial_port(struct usb_serial_port *port, struct moschip_port *mos7840_port) { dev_dbg(&port->dev, "SpRegOffset is %2x\n", mos7840_port->SpRegOffset); dev_dbg(&port->dev, "ControlRegOffset is %2x\n", mos7840_port->ControlRegOffset); dev_dbg(&port->dev, "DCRRegOffset is %2x\n", mos7840_port->DcrRegOffset); } /************************************************************************/ /************************************************************************/ /* U S B C A L L B A C K F U N C T I O N S */ /* U S B C A L L B A C K F U N C T I O N S */ /************************************************************************/ /************************************************************************/ static void mos7840_set_led_callback(struct urb *urb) { switch (urb->status) { case 0: /* Success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* This urb is terminated, clean up */ dev_dbg(&urb->dev->dev, "%s - urb shutting down: %d\n", __func__, urb->status); break; default: dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, urb->status); } } static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval, __u16 reg) { struct usb_device *dev = mcs->port->serial->dev; struct usb_ctrlrequest *dr = mcs->led_dr; dr->bRequestType = MCS_WR_RTYPE; dr->bRequest = MCS_WRREQ; dr->wValue = cpu_to_le16(wval); dr->wIndex = cpu_to_le16(reg); dr->wLength = cpu_to_le16(0); usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0), (unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL); usb_submit_urb(mcs->led_urb, GFP_ATOMIC); } static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } static void mos7840_led_off(struct timer_list *t) { struct moschip_port *mcs = from_timer(mcs, t, led_timer1); /* Turn off LED */ mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER); mod_timer(&mcs->led_timer2, jiffies + msecs_to_jiffies(LED_OFF_MS)); } static void mos7840_led_flag_off(struct timer_list *t) { struct moschip_port *mcs = from_timer(mcs, t, led_timer2); clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags); } static void mos7840_led_activity(struct usb_serial_port *port) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags)) return; mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER); mod_timer(&mos7840_port->led_timer1, jiffies + msecs_to_jiffies(LED_ON_MS)); } /***************************************************************************** * mos7840_bulk_in_callback * this is the callback function for when we have received data on the * bulk in endpoint. *****************************************************************************/ static void mos7840_bulk_in_callback(struct urb *urb) { struct moschip_port *mos7840_port = urb->context; struct usb_serial_port *port = mos7840_port->port; int retval; unsigned char *data; int status = urb->status; if (status) { dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status); mos7840_port->read_urb_busy = false; return; } data = urb->transfer_buffer; usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); if (urb->actual_length) { struct tty_port *tport = &mos7840_port->port->port; tty_insert_flip_string(tport, data, urb->actual_length); tty_flip_buffer_push(tport); port->icount.rx += urb->actual_length; dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx); } if (mos7840_port->has_led) mos7840_led_activity(port); mos7840_port->read_urb_busy = true; retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); if (retval) { dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, retval = %d\n", retval); mos7840_port->read_urb_busy = false; } } /***************************************************************************** * mos7840_bulk_out_data_callback * this is the callback function for when we have finished sending * serial data on the bulk out endpoint. *****************************************************************************/ static void mos7840_bulk_out_data_callback(struct urb *urb) { struct moschip_port *mos7840_port = urb->context; struct usb_serial_port *port = mos7840_port->port; int status = urb->status; unsigned long flags; int i; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; i++) { if (urb == mos7840_port->write_urb_pool[i]) { mos7840_port->busy[i] = 0; break; } } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); if (status) { dev_dbg(&port->dev, "nonzero write bulk status received:%d\n", status); return; } tty_port_tty_wakeup(&port->port); } /************************************************************************/ /* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */ /************************************************************************/ /***************************************************************************** * mos7840_open * this function is called by the tty driver when a port is opened * If successful, we return 0 * Otherwise we return a negative error number. *****************************************************************************/ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; int response; int j; struct urb *urb; __u16 Data; int status; usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe); /* Initialising the write urb pool */ for (j = 0; j < NUM_URBS; ++j) { urb = usb_alloc_urb(0, GFP_KERNEL); mos7840_port->write_urb_pool[j] = urb; if (!urb) continue; urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); if (!urb->transfer_buffer) { usb_free_urb(urb); mos7840_port->write_urb_pool[j] = NULL; continue; } } /***************************************************************************** * Initialize MCS7840 -- Write Init values to corresponding Registers * * Register Index * 1 : IER * 2 : FCR * 3 : LCR * 4 : MCR * * 0x08 : SP1/2 Control Reg *****************************************************************************/ /* NEED to check the following Block */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "Reading Spreg failed\n"); goto err; } Data |= 0x80; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "writing Spreg failed\n"); goto err; } Data &= ~0x80; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "writing Spreg failed\n"); goto err; } /* End of block to be checked */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "Reading Controlreg failed\n"); goto err; } Data |= 0x08; /* Driver done bit */ Data |= 0x20; /* rx_disable */ status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "writing Controlreg failed\n"); goto err; } /* do register settings here */ /* Set all regs to the device default values. */ /*********************************** * First Disable all interrupts. ***********************************/ Data = 0x00; status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "disabling interrupts failed\n"); goto err; } /* Set FIFO_CONTROL_REGISTER to the default value */ Data = 0x00; status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); goto err; } Data = 0xcf; status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n"); goto err; } Data = 0x03; status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); mos7840_port->shadowLCR = Data; Data = 0x0b; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); mos7840_port->shadowMCR = Data; Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); mos7840_port->shadowLCR = Data; Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); Data = 0x0c; status = mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data); Data = 0x0; status = mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data); Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); Data = Data & ~SERIAL_LCR_DLAB; status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); mos7840_port->shadowLCR = Data; /* clearing Bulkin and Bulkout Fifo */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); Data = Data | 0x0c; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); Data = Data & ~0x0c; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); /* Finally enable all interrupts */ Data = 0x0c; status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); /* clearing rx_disable */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); Data = Data & ~0x20; status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); /* rx_negate */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); Data = Data | 0x10; status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); dev_dbg(&port->dev, "port number is %d\n", port->port_number); dev_dbg(&port->dev, "minor number is %d\n", port->minor); dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress); dev_dbg(&port->dev, "BulkOut endpoint is %d\n", port->bulk_out_endpointAddress); dev_dbg(&port->dev, "Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress); dev_dbg(&port->dev, "port's number in the device is %d\n", mos7840_port->port_num); mos7840_port->read_urb = port->read_urb; /* set up our bulk in urb */ if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) { usb_fill_bulk_urb(mos7840_port->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, (port->bulk_in_endpointAddress) + 2), port->bulk_in_buffer, mos7840_port->read_urb->transfer_buffer_length, mos7840_bulk_in_callback, mos7840_port); } else { usb_fill_bulk_urb(mos7840_port->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress), port->bulk_in_buffer, mos7840_port->read_urb->transfer_buffer_length, mos7840_bulk_in_callback, mos7840_port); } dev_dbg(&port->dev, "%s: bulkin endpoint is %d\n", __func__, port->bulk_in_endpointAddress); mos7840_port->read_urb_busy = true; response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL); if (response) { dev_err(&port->dev, "%s - Error %d submitting control urb\n", __func__, response); mos7840_port->read_urb_busy = false; } /* initialize our port settings */ /* Must set to enable ints! */ mos7840_port->shadowMCR = MCR_MASTER_IE; return 0; err: for (j = 0; j < NUM_URBS; ++j) { urb = mos7840_port->write_urb_pool[j]; if (!urb) continue; kfree(urb->transfer_buffer); usb_free_urb(urb); } return status; } /***************************************************************************** * mos7840_chars_in_buffer * this function is called by the tty driver when it wants to know how many * bytes of data we currently have outstanding in the port (data that has * been written, but hasn't made it out the port yet) *****************************************************************************/ static unsigned int mos7840_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port = usb_get_serial_port_data(port); int i; unsigned int chars = 0; unsigned long flags; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (mos7840_port->busy[i]) { struct urb *urb = mos7840_port->write_urb_pool[i]; chars += urb->transfer_buffer_length; } } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); dev_dbg(&port->dev, "%s - returns %u\n", __func__, chars); return chars; } /***************************************************************************** * mos7840_close * this function is called by the tty driver when a port is closed *****************************************************************************/ static void mos7840_close(struct usb_serial_port *port) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); int j; __u16 Data; for (j = 0; j < NUM_URBS; ++j) usb_kill_urb(mos7840_port->write_urb_pool[j]); /* Freeing Write URBs */ for (j = 0; j < NUM_URBS; ++j) { if (mos7840_port->write_urb_pool[j]) { kfree(mos7840_port->write_urb_pool[j]->transfer_buffer); usb_free_urb(mos7840_port->write_urb_pool[j]); } } usb_kill_urb(mos7840_port->read_urb); mos7840_port->read_urb_busy = false; Data = 0x0; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); Data = 0x00; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); } /***************************************************************************** * mos7840_break * this function sends a break to the port *****************************************************************************/ static int mos7840_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port = usb_get_serial_port_data(port); unsigned char data; if (break_state == -1) data = mos7840_port->shadowLCR | LCR_SET_BREAK; else data = mos7840_port->shadowLCR & ~LCR_SET_BREAK; /* FIXME: no locking on shadowLCR anywhere in driver */ mos7840_port->shadowLCR = data; dev_dbg(&port->dev, "%s mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR); return mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, mos7840_port->shadowLCR); } /***************************************************************************** * mos7840_write_room * this function is called by the tty driver when it wants to know how many * bytes of data we can accept for a specific port. *****************************************************************************/ static unsigned int mos7840_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port = usb_get_serial_port_data(port); int i; unsigned int room = 0; unsigned long flags; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (!mos7840_port->busy[i]) room += URB_TRANSFER_BUFFER_SIZE; } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); room = (room == 0) ? 0 : room - URB_TRANSFER_BUFFER_SIZE + 1; dev_dbg(&mos7840_port->port->dev, "%s - returns %u\n", __func__, room); return room; } /***************************************************************************** * mos7840_write * this function is called by the tty driver when data should be written to * the port. * If successful, we return the number of bytes written, otherwise we * return a negative error number. *****************************************************************************/ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; int status; int i; int bytes_sent = 0; int transfer_size; unsigned long flags; struct urb *urb; /* __u16 Data; */ const unsigned char *current_position = data; /* try to find a free urb in the list */ urb = NULL; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (!mos7840_port->busy[i]) { mos7840_port->busy[i] = 1; urb = mos7840_port->write_urb_pool[i]; dev_dbg(&port->dev, "URB:%d\n", i); break; } } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); if (urb == NULL) { dev_dbg(&port->dev, "%s - no more free urbs\n", __func__); goto exit; } if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); if (!urb->transfer_buffer) { bytes_sent = -ENOMEM; goto exit; } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); memcpy(urb->transfer_buffer, current_position, transfer_size); /* fill urb with data and submit */ if ((serial->num_ports == 2) && (((__u16)port->port_number % 2) != 0)) { usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, (port->bulk_out_endpointAddress) + 2), urb->transfer_buffer, transfer_size, mos7840_bulk_out_data_callback, mos7840_port); } else { usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), urb->transfer_buffer, transfer_size, mos7840_bulk_out_data_callback, mos7840_port); } dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress); if (mos7840_port->has_led) mos7840_led_activity(port); /* send it down the pipe */ status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { mos7840_port->busy[i] = 0; dev_err_console(port, "%s - usb_submit_urb(write bulk) failed " "with status = %d\n", __func__, status); bytes_sent = status; goto exit; } bytes_sent = transfer_size; port->icount.tx += transfer_size; dev_dbg(&port->dev, "icount.tx is %d:\n", port->icount.tx); exit: return bytes_sent; } /***************************************************************************** * mos7840_throttle * this function is called by the tty driver when it wants to stop the data * being read from the port. *****************************************************************************/ static void mos7840_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port = usb_get_serial_port_data(port); int status; /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = mos7840_write(tty, port, &stop_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (C_CRTSCTS(tty)) { mos7840_port->shadowMCR &= ~MCR_RTS; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mos7840_port->shadowMCR); if (status < 0) return; } } /***************************************************************************** * mos7840_unthrottle * this function is called by the tty driver when it wants to resume * the data being read from the port (called after mos7840_throttle is * called) *****************************************************************************/ static void mos7840_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port = usb_get_serial_port_data(port); int status; /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = mos7840_write(tty, port, &start_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (C_CRTSCTS(tty)) { mos7840_port->shadowMCR |= MCR_RTS; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mos7840_port->shadowMCR); if (status < 0) return; } } static int mos7840_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned int result; __u16 msr; __u16 mcr; int status; status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr); if (status < 0) return -EIO; status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr); if (status < 0) return -EIO; result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0) | ((msr & MOS7840_MSR_CTS) ? TIOCM_CTS : 0) | ((msr & MOS7840_MSR_CD) ? TIOCM_CAR : 0) | ((msr & MOS7840_MSR_RI) ? TIOCM_RI : 0) | ((msr & MOS7840_MSR_DSR) ? TIOCM_DSR : 0); dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result); return result; } static int mos7840_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port = usb_get_serial_port_data(port); unsigned int mcr; int status; /* FIXME: What locks the port registers ? */ mcr = mos7840_port->shadowMCR; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; mos7840_port->shadowMCR = mcr; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); if (status < 0) { dev_dbg(&port->dev, "setting MODEM_CONTROL_REGISTER Failed\n"); return status; } return 0; } /***************************************************************************** * mos7840_calc_baud_rate_divisor * this function calculates the proper baud rate divisor for the specified * baud rate. *****************************************************************************/ static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port, int baudRate, int *divisor, __u16 *clk_sel_val) { dev_dbg(&port->dev, "%s - %d\n", __func__, baudRate); if (baudRate <= 115200) { *divisor = 115200 / baudRate; *clk_sel_val = 0x0; } if ((baudRate > 115200) && (baudRate <= 230400)) { *divisor = 230400 / baudRate; *clk_sel_val = 0x10; } else if ((baudRate > 230400) && (baudRate <= 403200)) { *divisor = 403200 / baudRate; *clk_sel_val = 0x20; } else if ((baudRate > 403200) && (baudRate <= 460800)) { *divisor = 460800 / baudRate; *clk_sel_val = 0x30; } else if ((baudRate > 460800) && (baudRate <= 806400)) { *divisor = 806400 / baudRate; *clk_sel_val = 0x40; } else if ((baudRate > 806400) && (baudRate <= 921600)) { *divisor = 921600 / baudRate; *clk_sel_val = 0x50; } else if ((baudRate > 921600) && (baudRate <= 1572864)) { *divisor = 1572864 / baudRate; *clk_sel_val = 0x60; } else if ((baudRate > 1572864) && (baudRate <= 3145728)) { *divisor = 3145728 / baudRate; *clk_sel_val = 0x70; } return 0; } /***************************************************************************** * mos7840_send_cmd_write_baud_rate * this function sends the proper command to change the baud rate of the * specified port. *****************************************************************************/ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port, int baudRate) { struct usb_serial_port *port = mos7840_port->port; int divisor = 0; int status; __u16 Data; __u16 clk_sel_val; dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate); /* reset clk_uart_sel in spregOffset */ if (baudRate > 115200) { #ifdef HW_flow_control /* NOTE: need to see the pther register to modify */ /* setting h/w flow control bit to 1 */ Data = 0x2b; mos7840_port->shadowMCR = Data; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n"); return -1; } #endif } else { #ifdef HW_flow_control /* setting h/w flow control bit to 0 */ Data = 0xb; mos7840_port->shadowMCR = Data; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n"); return -1; } #endif } if (1) { /* baudRate <= 115200) */ clk_sel_val = 0x0; Data = 0x0; status = mos7840_calc_baud_rate_divisor(port, baudRate, &divisor, &clk_sel_val); status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "reading spreg failed in set_serial_baud\n"); return -1; } Data = (Data & 0x8f) | clk_sel_val; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n"); return -1; } /* Calculate the Divisor */ if (status) { dev_err(&port->dev, "%s - bad baud rate\n", __func__); return status; } /* Enable access to divisor latch */ Data = mos7840_port->shadowLCR | SERIAL_LCR_DLAB; mos7840_port->shadowLCR = Data; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); /* Write the divisor */ Data = (unsigned char)(divisor & 0xff); dev_dbg(&port->dev, "set_serial_baud Value to write DLL is %x\n", Data); mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data); Data = (unsigned char)((divisor & 0xff00) >> 8); dev_dbg(&port->dev, "set_serial_baud Value to write DLM is %x\n", Data); mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data); /* Disable access to divisor latch */ Data = mos7840_port->shadowLCR & ~SERIAL_LCR_DLAB; mos7840_port->shadowLCR = Data; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); } return status; } /***************************************************************************** * mos7840_change_port_settings * This routine is called to set the UART on the device to match * the specified new settings. *****************************************************************************/ static void mos7840_change_port_settings(struct tty_struct *tty, struct moschip_port *mos7840_port, const struct ktermios *old_termios) { struct usb_serial_port *port = mos7840_port->port; int baud; unsigned cflag; __u8 lData; __u8 lParity; __u8 lStop; int status; __u16 Data; lData = LCR_BITS_8; lStop = LCR_STOP_1; lParity = LCR_PAR_NONE; cflag = tty->termios.c_cflag; /* Change the number of bits */ switch (cflag & CSIZE) { case CS5: lData = LCR_BITS_5; break; case CS6: lData = LCR_BITS_6; break; case CS7: lData = LCR_BITS_7; break; default: case CS8: lData = LCR_BITS_8; break; } /* Change the Parity bit */ if (cflag & PARENB) { if (cflag & PARODD) { lParity = LCR_PAR_ODD; dev_dbg(&port->dev, "%s - parity = odd\n", __func__); } else { lParity = LCR_PAR_EVEN; dev_dbg(&port->dev, "%s - parity = even\n", __func__); } } else { dev_dbg(&port->dev, "%s - parity = none\n", __func__); } if (cflag & CMSPAR) lParity = lParity | 0x20; /* Change the Stop bit */ if (cflag & CSTOPB) { lStop = LCR_STOP_2; dev_dbg(&port->dev, "%s - stop bits = 2\n", __func__); } else { lStop = LCR_STOP_1; dev_dbg(&port->dev, "%s - stop bits = 1\n", __func__); } /* Update the LCR with the correct value */ mos7840_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); mos7840_port->shadowLCR |= (lData | lParity | lStop); dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR); /* Disable Interrupts */ Data = 0x00; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); Data = 0x00; mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); Data = 0xcf; mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); /* Send the updated LCR value to the mos7840 */ Data = mos7840_port->shadowLCR; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); Data = 0x00b; mos7840_port->shadowMCR = Data; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); Data = 0x00b; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); /* set up the MCR register and send it to the mos7840 */ mos7840_port->shadowMCR = MCR_MASTER_IE; if (cflag & CBAUD) mos7840_port->shadowMCR |= (MCR_DTR | MCR_RTS); if (cflag & CRTSCTS) mos7840_port->shadowMCR |= (MCR_XON_ANY); else mos7840_port->shadowMCR &= ~(MCR_XON_ANY); Data = mos7840_port->shadowMCR; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ dev_dbg(&port->dev, "%s", "Picked default baud...\n"); baud = 9600; } dev_dbg(&port->dev, "%s - baud rate = %d\n", __func__, baud); status = mos7840_send_cmd_write_baud_rate(mos7840_port, baud); /* Enable Interrupts */ Data = 0x0c; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); if (!mos7840_port->read_urb_busy) { mos7840_port->read_urb_busy = true; status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL); if (status) { dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status); mos7840_port->read_urb_busy = false; } } dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, mos7840_port->shadowLCR); } /***************************************************************************** * mos7840_set_termios * this function is called by the tty driver when it wants to change * the termios structure *****************************************************************************/ static void mos7840_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); int status; /* change the port settings to the new ones specified */ mos7840_change_port_settings(tty, mos7840_port, old_termios); if (!mos7840_port->read_urb_busy) { mos7840_port->read_urb_busy = true; status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL); if (status) { dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n", status); mos7840_port->read_urb_busy = false; } } } /***************************************************************************** * mos7840_get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. *****************************************************************************/ static int mos7840_get_lsr_info(struct tty_struct *tty, unsigned int __user *value) { int count; unsigned int result = 0; count = mos7840_chars_in_buffer(tty); if (count == 0) result = TIOCSER_TEMT; if (copy_to_user(value, &result, sizeof(int))) return -EFAULT; return 0; } /***************************************************************************** * SerialIoctl * this function handles any ioctl calls to the driver *****************************************************************************/ static int mos7840_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; void __user *argp = (void __user *)arg; switch (cmd) { /* return number of bytes available */ case TIOCSERGETLSR: dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__); return mos7840_get_lsr_info(tty, argp); default: break; } return -ENOIOCTLCMD; } /* * Check if GPO (pin 42) is connected to GPI (pin 33) as recommended by ASIX * for MCS7810 by bit-banging a 16-bit word. * * Note that GPO is really RTS of the third port so this will toggle RTS of * port two or three on two- and four-port devices. */ static int mos7810_check(struct usb_serial *serial) { int i, pass_count = 0; u8 *buf; __u16 data = 0, mcr_data = 0; __u16 test_pattern = 0x55AA; int res; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return 0; /* failed to identify 7810 */ /* Store MCR setting */ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); if (res == VENDOR_READ_LENGTH) mcr_data = *buf; for (i = 0; i < 16; i++) { /* Send the 1-bit test pattern out to MCS7810 test pin */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCS_WRREQ, MCS_WR_RTYPE, (0x0300 | (((test_pattern >> i) & 0x0001) << 1)), MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); /* Read the test pattern back */ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); if (res == VENDOR_READ_LENGTH) data = *buf; /* If this is a MCS7810 device, both test patterns must match */ if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001) break; pass_count++; } /* Restore MCR setting */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCS_WRREQ, MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT); kfree(buf); if (pass_count == 16) return 1; return 0; } static int mos7840_probe(struct usb_serial *serial, const struct usb_device_id *id) { unsigned long device_flags = id->driver_info; u8 *buf; /* Skip device-type detection if we already have device flags. */ if (device_flags) goto out; buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); /* For a MCS7840 device GPIO0 must be set to 1 */ if (buf[0] & 0x01) device_flags = MCS_PORTS(4); else if (mos7810_check(serial)) device_flags = MCS_PORTS(1) | MCS_LED; else device_flags = MCS_PORTS(2); kfree(buf); out: usb_set_serial_data(serial, (void *)device_flags); return 0; } static int mos7840_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { unsigned long device_flags = (unsigned long)usb_get_serial_data(serial); int num_ports = MCS_PORTS(device_flags); if (num_ports == 0 || num_ports > 4) return -ENODEV; if (epds->num_bulk_in < num_ports || epds->num_bulk_out < num_ports) { dev_err(&serial->interface->dev, "missing endpoints\n"); return -ENODEV; } return num_ports; } static int mos7840_attach(struct usb_serial *serial) { struct device *dev = &serial->interface->dev; int status; u16 val; /* Zero Length flag enable */ val = 0x0f; status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, val); if (status < 0) dev_dbg(dev, "Writing ZLP_REG5 failed status-0x%x\n", status); else dev_dbg(dev, "ZLP_REG5 Writing success status%d\n", status); return status; } static int mos7840_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; unsigned long device_flags = (unsigned long)usb_get_serial_data(serial); struct moschip_port *mos7840_port; int status; int pnum; __u16 Data; /* we set up the pointers to the endpoints in the mos7840_open * * function, as the structures aren't created yet. */ pnum = port->port_number; dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum); mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL); if (!mos7840_port) return -ENOMEM; /* Initialize all port interrupt end point to port 0 int * endpoint. Our device has only one interrupt end point * common to all port */ mos7840_port->port = port; spin_lock_init(&mos7840_port->pool_lock); /* minor is not initialised until later by * usb-serial.c:get_free_serial() and cannot therefore be used * to index device instances */ mos7840_port->port_num = pnum + 1; dev_dbg(&port->dev, "port->minor = %d\n", port->minor); dev_dbg(&port->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num); if (mos7840_port->port_num == 1) { mos7840_port->SpRegOffset = 0x0; mos7840_port->ControlRegOffset = 0x1; mos7840_port->DcrRegOffset = 0x4; } else { u8 phy_num = mos7840_port->port_num; /* Port 2 in the 2-port case uses registers of port 3 */ if (serial->num_ports == 2) phy_num = 3; mos7840_port->SpRegOffset = 0x8 + 2 * (phy_num - 2); mos7840_port->ControlRegOffset = 0x9 + 2 * (phy_num - 2); mos7840_port->DcrRegOffset = 0x16 + 3 * (phy_num - 2); } mos7840_dump_serial_port(port, mos7840_port); usb_set_serial_port_data(port, mos7840_port); /* enable rx_disable bit in control register */ status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); if (status < 0) { dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status); Data |= 0x08; /* setting driver done bit */ Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */ /* Data |= 0x20; //rx_disable bit */ status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); if (status < 0) { dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status); /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 and 0x24 in DCR3 */ Data = 0x01; status = mos7840_set_reg_sync(port, (__u16) (mos7840_port->DcrRegOffset + 0), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status); Data = 0x05; status = mos7840_set_reg_sync(port, (__u16) (mos7840_port->DcrRegOffset + 1), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status); Data = 0x24; status = mos7840_set_reg_sync(port, (__u16) (mos7840_port->DcrRegOffset + 2), Data); if (status < 0) { dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status); /* write values in clkstart0x0 and clkmulti 0x20 */ Data = 0x0; status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status); Data = 0x20; status = mos7840_set_reg_sync(port, CLK_MULTI_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status); /* write value 0x0 to scratchpad register */ Data = 0x00; status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data); if (status < 0) { dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status); goto error; } else dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status); /* Zero Length flag register */ if ((mos7840_port->port_num != 1) && (serial->num_ports == 2)) { Data = 0xff; status = mos7840_set_reg_sync(port, (__u16) (ZLP_REG1 + ((__u16)mos7840_port->port_num)), Data); dev_dbg(&port->dev, "ZLIP offset %x\n", (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num))); if (status < 0) { dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status); goto error; } else dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status); } else { Data = 0xff; status = mos7840_set_reg_sync(port, (__u16) (ZLP_REG1 + ((__u16)mos7840_port->port_num) - 0x1), Data); dev_dbg(&port->dev, "ZLIP offset %x\n", (__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1)); if (status < 0) { dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status); goto error; } else dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status); } mos7840_port->has_led = device_flags & MCS_LED; /* Initialize LED timers */ if (mos7840_port->has_led) { mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL); mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr), GFP_KERNEL); if (!mos7840_port->led_urb || !mos7840_port->led_dr) { status = -ENOMEM; goto error; } timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0); mos7840_port->led_timer1.expires = jiffies + msecs_to_jiffies(LED_ON_MS); timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off, 0); mos7840_port->led_timer2.expires = jiffies + msecs_to_jiffies(LED_OFF_MS); /* Turn off LED */ mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); } return 0; error: kfree(mos7840_port->led_dr); usb_free_urb(mos7840_port->led_urb); kfree(mos7840_port); return status; } static void mos7840_port_remove(struct usb_serial_port *port) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); if (mos7840_port->has_led) { /* Turn off LED */ mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300); timer_shutdown_sync(&mos7840_port->led_timer1); timer_shutdown_sync(&mos7840_port->led_timer2); usb_kill_urb(mos7840_port->led_urb); usb_free_urb(mos7840_port->led_urb); kfree(mos7840_port->led_dr); } kfree(mos7840_port); } static int mos7840_suspend(struct usb_serial *serial, pm_message_t message) { struct moschip_port *mos7840_port; struct usb_serial_port *port; int i; for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; if (!tty_port_initialized(&port->port)) continue; mos7840_port = usb_get_serial_port_data(port); usb_kill_urb(mos7840_port->read_urb); mos7840_port->read_urb_busy = false; } return 0; } static int mos7840_resume(struct usb_serial *serial) { struct moschip_port *mos7840_port; struct usb_serial_port *port; int res; int i; for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; if (!tty_port_initialized(&port->port)) continue; mos7840_port = usb_get_serial_port_data(port); mos7840_port->read_urb_busy = true; res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO); if (res) mos7840_port->read_urb_busy = false; } return 0; } static struct usb_serial_driver moschip7840_4port_device = { .driver = { .name = "mos7840", }, .description = DRIVER_DESC, .id_table = id_table, .num_interrupt_in = 1, .open = mos7840_open, .close = mos7840_close, .write = mos7840_write, .write_room = mos7840_write_room, .chars_in_buffer = mos7840_chars_in_buffer, .throttle = mos7840_throttle, .unthrottle = mos7840_unthrottle, .calc_num_ports = mos7840_calc_num_ports, .probe = mos7840_probe, .attach = mos7840_attach, .ioctl = mos7840_ioctl, .set_termios = mos7840_set_termios, .break_ctl = mos7840_break, .tiocmget = mos7840_tiocmget, .tiocmset = mos7840_tiocmset, .get_icount = usb_serial_generic_get_icount, .port_probe = mos7840_port_probe, .port_remove = mos7840_port_remove, .read_bulk_callback = mos7840_bulk_in_callback, .suspend = mos7840_suspend, .resume = mos7840_resume, }; static struct usb_serial_driver * const serial_drivers[] = { &moschip7840_4port_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
98 100 101 101 100 99 99 99 99 99 99 98 99 99 99 99 99 101 99 101 101 9 8 101 103 103 103 99 99 103 101 16 101 102 103 103 103 103 102 101 102 103 103 101 101 100 101 101 103 102 102 102 99 99 99 99 97 99 98 103 103 103 103 99 99 99 98 99 100 101 99 99 98 99 99 101 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 // SPDX-License-Identifier: GPL-2.0-or-later /* * NET3 IP device support routines. * * Derived from the IP parts of dev.c 1.0.19 * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * * Additional Authors: * Alan Cox, <gw4pts@gw4pts.ampr.org> * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * Alexey Kuznetsov: pa_* fields are replaced with ifaddr * lists. * Cyrus Durgin: updated for kmod * Matthias Andree: in devinet_ioctl, compare label and * address (4.4BSD alias style support), * fall back to comparing just the label * if no match found. */ #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/capability.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/if_addr.h> #include <linux/if_ether.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/notifier.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include "igmp_internal.h" #include <linux/slab.h> #include <linux/hash.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <linux/kmod.h> #include <linux/netconf.h> #include <net/arp.h> #include <net/ip.h> #include <net/route.h> #include <net/ip_fib.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> #include <net/addrconf.h> #define IPV6ONLY_FLAGS \ (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \ IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \ IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY) static struct ipv4_devconf ipv4_devconf = { .data = { [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1, [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/, [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/, [IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1, }, }; static struct ipv4_devconf ipv4_devconf_dflt = { .data = { [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1, [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1, [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1, [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/, [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/, [IPV4_DEVCONF_ARP_EVICT_NOCARRIER - 1] = 1, }, }; #define IPV4_DEVCONF_DFLT(net, attr) \ IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr) static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = { [IFA_LOCAL] = { .type = NLA_U32 }, [IFA_ADDRESS] = { .type = NLA_U32 }, [IFA_BROADCAST] = { .type = NLA_U32 }, [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) }, [IFA_FLAGS] = { .type = NLA_U32 }, [IFA_RT_PRIORITY] = { .type = NLA_U32 }, [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, [IFA_PROTO] = { .type = NLA_U8 }, }; #define IN4_ADDR_HSIZE_SHIFT 8 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT) static u32 inet_addr_hash(const struct net *net, __be32 addr) { u32 val = __ipv4_addr_hash(addr, net_hash_mix(net)); return hash_32(val, IN4_ADDR_HSIZE_SHIFT); } static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa) { u32 hash = inet_addr_hash(net, ifa->ifa_local); ASSERT_RTNL(); hlist_add_head_rcu(&ifa->addr_lst, &net->ipv4.inet_addr_lst[hash]); } static void inet_hash_remove(struct in_ifaddr *ifa) { ASSERT_RTNL(); hlist_del_init_rcu(&ifa->addr_lst); } /** * __ip_dev_find - find the first device with a given source address. * @net: the net namespace * @addr: the source address * @devref: if true, take a reference on the found device * * If a caller uses devref=false, it should be protected by RCU, or RTNL */ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref) { struct net_device *result = NULL; struct in_ifaddr *ifa; rcu_read_lock(); ifa = inet_lookup_ifaddr_rcu(net, addr); if (!ifa) { struct flowi4 fl4 = { .daddr = addr }; struct fib_result res = { 0 }; struct fib_table *local; /* Fallback to FIB local table so that communication * over loopback subnets work. */ local = fib_get_table(net, RT_TABLE_LOCAL); if (local && !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) && res.type == RTN_LOCAL) result = FIB_RES_DEV(res); } else { result = ifa->ifa_dev->dev; } if (result && devref) dev_hold(result); rcu_read_unlock(); return result; } EXPORT_SYMBOL(__ip_dev_find); /* called under RCU lock */ struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr) { u32 hash = inet_addr_hash(net, addr); struct in_ifaddr *ifa; hlist_for_each_entry_rcu(ifa, &net->ipv4.inet_addr_lst[hash], addr_lst) if (ifa->ifa_local == addr) return ifa; return NULL; } static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32); static BLOCKING_NOTIFIER_HEAD(inetaddr_chain); static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain); static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr __rcu **ifap, int destroy); #ifdef CONFIG_SYSCTL static int devinet_sysctl_register(struct in_device *idev); static void devinet_sysctl_unregister(struct in_device *idev); #else static int devinet_sysctl_register(struct in_device *idev) { return 0; } static void devinet_sysctl_unregister(struct in_device *idev) { } #endif /* Locks all the inet devices. */ static struct in_ifaddr *inet_alloc_ifa(struct in_device *in_dev) { struct in_ifaddr *ifa; ifa = kzalloc(sizeof(*ifa), GFP_KERNEL_ACCOUNT); if (!ifa) return NULL; in_dev_hold(in_dev); ifa->ifa_dev = in_dev; INIT_HLIST_NODE(&ifa->addr_lst); return ifa; } static void inet_rcu_free_ifa(struct rcu_head *head) { struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head); in_dev_put(ifa->ifa_dev); kfree(ifa); } static void inet_free_ifa(struct in_ifaddr *ifa) { /* Our reference to ifa->ifa_dev must be freed ASAP * to release the reference to the netdev the same way. * in_dev_put() -> in_dev_finish_destroy() -> netdev_put() */ call_rcu_hurry(&ifa->rcu_head, inet_rcu_free_ifa); } static void in_dev_free_rcu(struct rcu_head *head) { struct in_device *idev = container_of(head, struct in_device, rcu_head); kfree(rcu_dereference_protected(idev->mc_hash, 1)); kfree(idev); } void in_dev_finish_destroy(struct in_device *idev) { struct net_device *dev = idev->dev; WARN_ON(idev->ifa_list); WARN_ON(idev->mc_list); #ifdef NET_REFCNT_DEBUG pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL"); #endif netdev_put(dev, &idev->dev_tracker); if (!idev->dead) pr_err("Freeing alive in_device %p\n", idev); else call_rcu(&idev->rcu_head, in_dev_free_rcu); } EXPORT_SYMBOL(in_dev_finish_destroy); static struct in_device *inetdev_init(struct net_device *dev) { struct in_device *in_dev; int err = -ENOMEM; ASSERT_RTNL(); in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); if (!in_dev) goto out; memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt, sizeof(in_dev->cnf)); in_dev->cnf.sysctl = NULL; in_dev->dev = dev; in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl); if (!in_dev->arp_parms) goto out_kfree; if (IPV4_DEVCONF(in_dev->cnf, FORWARDING)) netif_disable_lro(dev); /* Reference in_dev->dev */ netdev_hold(dev, &in_dev->dev_tracker, GFP_KERNEL); /* Account for reference dev->ip_ptr (below) */ refcount_set(&in_dev->refcnt, 1); if (dev != blackhole_netdev) { err = devinet_sysctl_register(in_dev); if (err) { in_dev->dead = 1; neigh_parms_release(&arp_tbl, in_dev->arp_parms); in_dev_put(in_dev); in_dev = NULL; goto out; } ip_mc_init_dev(in_dev); if (dev->flags & IFF_UP) ip_mc_up(in_dev); } /* we can receive as soon as ip_ptr is set -- do this last */ rcu_assign_pointer(dev->ip_ptr, in_dev); out: return in_dev ?: ERR_PTR(err); out_kfree: kfree(in_dev); in_dev = NULL; goto out; } static void inetdev_destroy(struct in_device *in_dev) { struct net_device *dev; struct in_ifaddr *ifa; ASSERT_RTNL(); dev = in_dev->dev; in_dev->dead = 1; ip_mc_destroy_dev(in_dev); while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) { inet_del_ifa(in_dev, &in_dev->ifa_list, 0); inet_free_ifa(ifa); } RCU_INIT_POINTER(dev->ip_ptr, NULL); devinet_sysctl_unregister(in_dev); neigh_parms_release(&arp_tbl, in_dev->arp_parms); arp_ifdown(dev); in_dev_put(in_dev); } static int __init inet_blackhole_dev_init(void) { int err = 0; rtnl_lock(); if (!inetdev_init(blackhole_netdev)) err = -ENOMEM; rtnl_unlock(); return err; } late_initcall(inet_blackhole_dev_init); int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b) { const struct in_ifaddr *ifa; rcu_read_lock(); in_dev_for_each_ifa_rcu(ifa, in_dev) { if (inet_ifa_match(a, ifa)) { if (!b || inet_ifa_match(b, ifa)) { rcu_read_unlock(); return 1; } } } rcu_read_unlock(); return 0; } static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr __rcu **ifap, int destroy, struct nlmsghdr *nlh, u32 portid) { struct in_ifaddr *promote = NULL; struct in_ifaddr *ifa, *ifa1; struct in_ifaddr __rcu **last_prim; struct in_ifaddr *prev_prom = NULL; int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev); ASSERT_RTNL(); ifa1 = rtnl_dereference(*ifap); last_prim = ifap; if (in_dev->dead) goto no_promotions; /* 1. Deleting primary ifaddr forces deletion all secondaries * unless alias promotion is set **/ if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) { struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next; while ((ifa = rtnl_dereference(*ifap1)) != NULL) { if (!(ifa->ifa_flags & IFA_F_SECONDARY) && ifa1->ifa_scope <= ifa->ifa_scope) last_prim = &ifa->ifa_next; if (!(ifa->ifa_flags & IFA_F_SECONDARY) || ifa1->ifa_mask != ifa->ifa_mask || !inet_ifa_match(ifa1->ifa_address, ifa)) { ifap1 = &ifa->ifa_next; prev_prom = ifa; continue; } if (!do_promote) { inet_hash_remove(ifa); *ifap1 = ifa->ifa_next; rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); inet_free_ifa(ifa); } else { promote = ifa; break; } } } /* On promotion all secondaries from subnet are changing * the primary IP, we must remove all their routes silently * and later to add them back with new prefsrc. Do this * while all addresses are on the device list. */ for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) { if (ifa1->ifa_mask == ifa->ifa_mask && inet_ifa_match(ifa1->ifa_address, ifa)) fib_del_ifaddr(ifa, ifa1); } no_promotions: /* 2. Unlink it */ *ifap = ifa1->ifa_next; inet_hash_remove(ifa1); /* 3. Announce address deletion */ /* Send message first, then call notifier. At first sight, FIB update triggered by notifier will refer to already deleted ifaddr, that could confuse netlink listeners. It is not true: look, gated sees that route deleted and if it still thinks that ifaddr is valid, it will try to restore deleted routes... Grr. So that, this order is correct. */ rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1); if (promote) { struct in_ifaddr *next_sec; next_sec = rtnl_dereference(promote->ifa_next); if (prev_prom) { struct in_ifaddr *last_sec; rcu_assign_pointer(prev_prom->ifa_next, next_sec); last_sec = rtnl_dereference(*last_prim); rcu_assign_pointer(promote->ifa_next, last_sec); rcu_assign_pointer(*last_prim, promote); } promote->ifa_flags &= ~IFA_F_SECONDARY; rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote); for (ifa = next_sec; ifa; ifa = rtnl_dereference(ifa->ifa_next)) { if (ifa1->ifa_mask != ifa->ifa_mask || !inet_ifa_match(ifa1->ifa_address, ifa)) continue; fib_add_ifaddr(ifa); } } if (destroy) inet_free_ifa(ifa1); } static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr __rcu **ifap, int destroy) { __inet_del_ifa(in_dev, ifap, destroy, NULL, 0); } static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, u32 portid, struct netlink_ext_ack *extack) { struct in_ifaddr __rcu **last_primary, **ifap; struct in_device *in_dev = ifa->ifa_dev; struct net *net = dev_net(in_dev->dev); struct in_validator_info ivi; struct in_ifaddr *ifa1; int ret; ASSERT_RTNL(); ifa->ifa_flags &= ~IFA_F_SECONDARY; last_primary = &in_dev->ifa_list; /* Don't set IPv6 only flags to IPv4 addresses */ ifa->ifa_flags &= ~IPV6ONLY_FLAGS; ifap = &in_dev->ifa_list; ifa1 = rtnl_dereference(*ifap); while (ifa1) { if (!(ifa1->ifa_flags & IFA_F_SECONDARY) && ifa->ifa_scope <= ifa1->ifa_scope) last_primary = &ifa1->ifa_next; if (ifa1->ifa_mask == ifa->ifa_mask && inet_ifa_match(ifa1->ifa_address, ifa)) { if (ifa1->ifa_local == ifa->ifa_local) { inet_free_ifa(ifa); return -EEXIST; } if (ifa1->ifa_scope != ifa->ifa_scope) { NL_SET_ERR_MSG(extack, "ipv4: Invalid scope value"); inet_free_ifa(ifa); return -EINVAL; } ifa->ifa_flags |= IFA_F_SECONDARY; } ifap = &ifa1->ifa_next; ifa1 = rtnl_dereference(*ifap); } /* Allow any devices that wish to register ifaddr validtors to weigh * in now, before changes are committed. The rntl lock is serializing * access here, so the state should not change between a validator call * and a final notify on commit. This isn't invoked on promotion under * the assumption that validators are checking the address itself, and * not the flags. */ ivi.ivi_addr = ifa->ifa_address; ivi.ivi_dev = ifa->ifa_dev; ivi.extack = extack; ret = blocking_notifier_call_chain(&inetaddr_validator_chain, NETDEV_UP, &ivi); ret = notifier_to_errno(ret); if (ret) { inet_free_ifa(ifa); return ret; } if (!(ifa->ifa_flags & IFA_F_SECONDARY)) ifap = last_primary; rcu_assign_pointer(ifa->ifa_next, *ifap); rcu_assign_pointer(*ifap, ifa); inet_hash_insert(dev_net(in_dev->dev), ifa); cancel_delayed_work(&net->ipv4.addr_chk_work); queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, 0); /* Send message first, then call notifier. Notifier will trigger FIB update, so that listeners of netlink will know about new ifaddr */ rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid); blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); return 0; } static int inet_insert_ifa(struct in_ifaddr *ifa) { if (!ifa->ifa_local) { inet_free_ifa(ifa); return 0; } return __inet_insert_ifa(ifa, NULL, 0, NULL); } static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) { struct in_device *in_dev = __in_dev_get_rtnl_net(dev); ipv4_devconf_setall(in_dev); neigh_parms_data_state_setall(in_dev->arp_parms); if (ipv4_is_loopback(ifa->ifa_local)) ifa->ifa_scope = RT_SCOPE_HOST; return inet_insert_ifa(ifa); } /* Caller must hold RCU or RTNL : * We dont take a reference on found in_device */ struct in_device *inetdev_by_index(struct net *net, int ifindex) { struct net_device *dev; struct in_device *in_dev = NULL; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) in_dev = rcu_dereference_rtnl(dev->ip_ptr); rcu_read_unlock(); return in_dev; } EXPORT_SYMBOL(inetdev_by_index); /* Called only from RTNL semaphored context. No locks. */ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask) { struct in_ifaddr *ifa; ASSERT_RTNL(); in_dev_for_each_ifa_rtnl(ifa, in_dev) { if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa)) return ifa; } return NULL; } static int ip_mc_autojoin_config(struct net *net, bool join, const struct in_ifaddr *ifa) { #if defined(CONFIG_IP_MULTICAST) struct ip_mreqn mreq = { .imr_multiaddr.s_addr = ifa->ifa_address, .imr_ifindex = ifa->ifa_dev->dev->ifindex, }; struct sock *sk = net->ipv4.mc_autojoin_sk; int ret; ASSERT_RTNL_NET(net); lock_sock(sk); if (join) ret = ip_mc_join_group(sk, &mreq); else ret = ip_mc_leave_group(sk, &mreq); release_sock(sk); return ret; #else return -EOPNOTSUPP; #endif } static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct in_ifaddr __rcu **ifap; struct nlattr *tb[IFA_MAX+1]; struct in_device *in_dev; struct ifaddrmsg *ifm; struct in_ifaddr *ifa; int err; err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy, extack); if (err < 0) goto out; ifm = nlmsg_data(nlh); rtnl_net_lock(net); in_dev = inetdev_by_index(net, ifm->ifa_index); if (!in_dev) { NL_SET_ERR_MSG(extack, "ipv4: Device not found"); err = -ENODEV; goto unlock; } for (ifap = &in_dev->ifa_list; (ifa = rtnl_net_dereference(net, *ifap)) != NULL; ifap = &ifa->ifa_next) { if (tb[IFA_LOCAL] && ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL])) continue; if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) continue; if (tb[IFA_ADDRESS] && (ifm->ifa_prefixlen != ifa->ifa_prefixlen || !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa))) continue; if (ipv4_is_multicast(ifa->ifa_address)) ip_mc_autojoin_config(net, false, ifa); __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid); goto unlock; } NL_SET_ERR_MSG(extack, "ipv4: Address not found"); err = -EADDRNOTAVAIL; unlock: rtnl_net_unlock(net); out: return err; } static void check_lifetime(struct work_struct *work) { unsigned long now, next, next_sec, next_sched; struct in_ifaddr *ifa; struct hlist_node *n; struct net *net; int i; net = container_of(to_delayed_work(work), struct net, ipv4.addr_chk_work); now = jiffies; next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); for (i = 0; i < IN4_ADDR_HSIZE; i++) { struct hlist_head *head = &net->ipv4.inet_addr_lst[i]; bool change_needed = false; rcu_read_lock(); hlist_for_each_entry_rcu(ifa, head, addr_lst) { unsigned long age, tstamp; u32 preferred_lft; u32 valid_lft; u32 flags; flags = READ_ONCE(ifa->ifa_flags); if (flags & IFA_F_PERMANENT) continue; preferred_lft = READ_ONCE(ifa->ifa_preferred_lft); valid_lft = READ_ONCE(ifa->ifa_valid_lft); tstamp = READ_ONCE(ifa->ifa_tstamp); /* We try to batch several events at once. */ age = (now - tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; if (valid_lft != INFINITY_LIFE_TIME && age >= valid_lft) { change_needed = true; } else if (preferred_lft == INFINITY_LIFE_TIME) { continue; } else if (age >= preferred_lft) { if (time_before(tstamp + valid_lft * HZ, next)) next = tstamp + valid_lft * HZ; if (!(flags & IFA_F_DEPRECATED)) change_needed = true; } else if (time_before(tstamp + preferred_lft * HZ, next)) { next = tstamp + preferred_lft * HZ; } } rcu_read_unlock(); if (!change_needed) continue; rtnl_net_lock(net); hlist_for_each_entry_safe(ifa, n, head, addr_lst) { unsigned long age; if (ifa->ifa_flags & IFA_F_PERMANENT) continue; /* We try to batch several events at once. */ age = (now - ifa->ifa_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && age >= ifa->ifa_valid_lft) { struct in_ifaddr __rcu **ifap; struct in_ifaddr *tmp; ifap = &ifa->ifa_dev->ifa_list; tmp = rtnl_net_dereference(net, *ifap); while (tmp) { if (tmp == ifa) { inet_del_ifa(ifa->ifa_dev, ifap, 1); break; } ifap = &tmp->ifa_next; tmp = rtnl_net_dereference(net, *ifap); } } else if (ifa->ifa_preferred_lft != INFINITY_LIFE_TIME && age >= ifa->ifa_preferred_lft && !(ifa->ifa_flags & IFA_F_DEPRECATED)) { ifa->ifa_flags |= IFA_F_DEPRECATED; rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); } } rtnl_net_unlock(net); } next_sec = round_jiffies_up(next); next_sched = next; /* If rounded timeout is accurate enough, accept it. */ if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ)) next_sched = next_sec; now = jiffies; /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */ if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX)) next_sched = now + ADDRCONF_TIMER_FUZZ_MAX; queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, next_sched - now); } static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft, __u32 prefered_lft) { unsigned long timeout; u32 flags; flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED); timeout = addrconf_timeout_fixup(valid_lft, HZ); if (addrconf_finite_timeout(timeout)) WRITE_ONCE(ifa->ifa_valid_lft, timeout); else flags |= IFA_F_PERMANENT; timeout = addrconf_timeout_fixup(prefered_lft, HZ); if (addrconf_finite_timeout(timeout)) { if (timeout == 0) flags |= IFA_F_DEPRECATED; WRITE_ONCE(ifa->ifa_preferred_lft, timeout); } WRITE_ONCE(ifa->ifa_flags, flags); WRITE_ONCE(ifa->ifa_tstamp, jiffies); if (!ifa->ifa_cstamp) WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp); } static int inet_validate_rtm(struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack, __u32 *valid_lft, __u32 *prefered_lft) { struct ifaddrmsg *ifm = nlmsg_data(nlh); int err; err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy, extack); if (err < 0) return err; if (ifm->ifa_prefixlen > 32) { NL_SET_ERR_MSG(extack, "ipv4: Invalid prefix length"); return -EINVAL; } if (!tb[IFA_LOCAL]) { NL_SET_ERR_MSG(extack, "ipv4: Local address is not supplied"); return -EINVAL; } if (tb[IFA_CACHEINFO]) { struct ifa_cacheinfo *ci; ci = nla_data(tb[IFA_CACHEINFO]); if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) { NL_SET_ERR_MSG(extack, "ipv4: address lifetime invalid"); return -EINVAL; } *valid_lft = ci->ifa_valid; *prefered_lft = ci->ifa_prefered; } return 0; } static struct in_ifaddr *inet_rtm_to_ifa(struct net *net, struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack) { struct ifaddrmsg *ifm = nlmsg_data(nlh); struct in_device *in_dev; struct net_device *dev; struct in_ifaddr *ifa; int err; dev = __dev_get_by_index(net, ifm->ifa_index); err = -ENODEV; if (!dev) { NL_SET_ERR_MSG(extack, "ipv4: Device not found"); goto errout; } in_dev = __in_dev_get_rtnl_net(dev); err = -ENOBUFS; if (!in_dev) goto errout; ifa = inet_alloc_ifa(in_dev); if (!ifa) /* * A potential indev allocation can be left alive, it stays * assigned to its device and is destroy with it. */ goto errout; ipv4_devconf_setall(in_dev); neigh_parms_data_state_setall(in_dev->arp_parms); if (!tb[IFA_ADDRESS]) tb[IFA_ADDRESS] = tb[IFA_LOCAL]; ifa->ifa_prefixlen = ifm->ifa_prefixlen; ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen); ifa->ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags); ifa->ifa_scope = ifm->ifa_scope; ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]); ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]); if (tb[IFA_BROADCAST]) ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]); if (tb[IFA_LABEL]) nla_strscpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); else memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); if (tb[IFA_RT_PRIORITY]) ifa->ifa_rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]); if (tb[IFA_PROTO]) ifa->ifa_proto = nla_get_u8(tb[IFA_PROTO]); return ifa; errout: return ERR_PTR(err); } static struct in_ifaddr *find_matching_ifa(struct net *net, struct in_ifaddr *ifa) { struct in_device *in_dev = ifa->ifa_dev; struct in_ifaddr *ifa1; in_dev_for_each_ifa_rtnl_net(net, ifa1, in_dev) { if (ifa1->ifa_mask == ifa->ifa_mask && inet_ifa_match(ifa1->ifa_address, ifa) && ifa1->ifa_local == ifa->ifa_local) return ifa1; } return NULL; } static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { __u32 prefered_lft = INFINITY_LIFE_TIME; __u32 valid_lft = INFINITY_LIFE_TIME; struct net *net = sock_net(skb->sk); struct in_ifaddr *ifa_existing; struct nlattr *tb[IFA_MAX + 1]; struct in_ifaddr *ifa; int ret; ret = inet_validate_rtm(nlh, tb, extack, &valid_lft, &prefered_lft); if (ret < 0) return ret; if (!nla_get_in_addr(tb[IFA_LOCAL])) return 0; rtnl_net_lock(net); ifa = inet_rtm_to_ifa(net, nlh, tb, extack); if (IS_ERR(ifa)) { ret = PTR_ERR(ifa); goto unlock; } ifa_existing = find_matching_ifa(net, ifa); if (!ifa_existing) { /* It would be best to check for !NLM_F_CREATE here but * userspace already relies on not having to provide this. */ set_ifa_lifetime(ifa, valid_lft, prefered_lft); if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) { ret = ip_mc_autojoin_config(net, true, ifa); if (ret < 0) { NL_SET_ERR_MSG(extack, "ipv4: Multicast auto join failed"); inet_free_ifa(ifa); goto unlock; } } ret = __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid, extack); } else { u32 new_metric = ifa->ifa_rt_priority; u8 new_proto = ifa->ifa_proto; inet_free_ifa(ifa); if (nlh->nlmsg_flags & NLM_F_EXCL || !(nlh->nlmsg_flags & NLM_F_REPLACE)) { NL_SET_ERR_MSG(extack, "ipv4: Address already assigned"); ret = -EEXIST; goto unlock; } ifa = ifa_existing; if (ifa->ifa_rt_priority != new_metric) { fib_modify_prefix_metric(ifa, new_metric); ifa->ifa_rt_priority = new_metric; } ifa->ifa_proto = new_proto; set_ifa_lifetime(ifa, valid_lft, prefered_lft); cancel_delayed_work(&net->ipv4.addr_chk_work); queue_delayed_work(system_power_efficient_wq, &net->ipv4.addr_chk_work, 0); rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); } unlock: rtnl_net_unlock(net); return ret; } /* * Determine a default network mask, based on the IP address. */ static int inet_abc_len(__be32 addr) { int rc = -1; /* Something else, probably a multicast. */ if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) rc = 0; else { __u32 haddr = ntohl(addr); if (IN_CLASSA(haddr)) rc = 8; else if (IN_CLASSB(haddr)) rc = 16; else if (IN_CLASSC(haddr)) rc = 24; else if (IN_CLASSE(haddr)) rc = 32; } return rc; } int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr) { struct sockaddr_in sin_orig; struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr; struct in_ifaddr __rcu **ifap = NULL; struct in_device *in_dev; struct in_ifaddr *ifa = NULL; struct net_device *dev; char *colon; int ret = -EFAULT; int tryaddrmatch = 0; ifr->ifr_name[IFNAMSIZ - 1] = 0; /* save original address for comparison */ memcpy(&sin_orig, sin, sizeof(*sin)); colon = strchr(ifr->ifr_name, ':'); if (colon) *colon = 0; dev_load(net, ifr->ifr_name); switch (cmd) { case SIOCGIFADDR: /* Get interface address */ case SIOCGIFBRDADDR: /* Get the broadcast address */ case SIOCGIFDSTADDR: /* Get the destination address */ case SIOCGIFNETMASK: /* Get the netmask for the interface */ /* Note that these ioctls will not sleep, so that we do not impose a lock. One day we will be forced to put shlock here (I mean SMP) */ tryaddrmatch = (sin_orig.sin_family == AF_INET); memset(sin, 0, sizeof(*sin)); sin->sin_family = AF_INET; break; case SIOCSIFFLAGS: ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto out; break; case SIOCSIFADDR: /* Set interface address (and family) */ case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */ ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto out; ret = -EINVAL; if (sin->sin_family != AF_INET) goto out; break; default: ret = -EINVAL; goto out; } rtnl_net_lock(net); ret = -ENODEV; dev = __dev_get_by_name(net, ifr->ifr_name); if (!dev) goto done; if (colon) *colon = ':'; in_dev = __in_dev_get_rtnl_net(dev); if (in_dev) { if (tryaddrmatch) { /* Matthias Andree */ /* compare label and address (4.4BSD style) */ /* note: we only do this for a limited set of ioctls and only if the original address family was AF_INET. This is checked above. */ for (ifap = &in_dev->ifa_list; (ifa = rtnl_net_dereference(net, *ifap)) != NULL; ifap = &ifa->ifa_next) { if (!strcmp(ifr->ifr_name, ifa->ifa_label) && sin_orig.sin_addr.s_addr == ifa->ifa_local) { break; /* found */ } } } /* we didn't get a match, maybe the application is 4.3BSD-style and passed in junk so we fall back to comparing just the label */ if (!ifa) { for (ifap = &in_dev->ifa_list; (ifa = rtnl_net_dereference(net, *ifap)) != NULL; ifap = &ifa->ifa_next) if (!strcmp(ifr->ifr_name, ifa->ifa_label)) break; } } ret = -EADDRNOTAVAIL; if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) goto done; switch (cmd) { case SIOCGIFADDR: /* Get interface address */ ret = 0; sin->sin_addr.s_addr = ifa->ifa_local; break; case SIOCGIFBRDADDR: /* Get the broadcast address */ ret = 0; sin->sin_addr.s_addr = ifa->ifa_broadcast; break; case SIOCGIFDSTADDR: /* Get the destination address */ ret = 0; sin->sin_addr.s_addr = ifa->ifa_address; break; case SIOCGIFNETMASK: /* Get the netmask for the interface */ ret = 0; sin->sin_addr.s_addr = ifa->ifa_mask; break; case SIOCSIFFLAGS: if (colon) { ret = -EADDRNOTAVAIL; if (!ifa) break; ret = 0; if (!(ifr->ifr_flags & IFF_UP)) inet_del_ifa(in_dev, ifap, 1); break; } /* NETDEV_UP/DOWN/CHANGE could touch a peer dev */ ASSERT_RTNL(); ret = dev_change_flags(dev, ifr->ifr_flags, NULL); break; case SIOCSIFADDR: /* Set interface address (and family) */ ret = -EINVAL; if (inet_abc_len(sin->sin_addr.s_addr) < 0) break; if (!ifa) { ret = -ENOBUFS; if (!in_dev) break; ifa = inet_alloc_ifa(in_dev); if (!ifa) break; if (colon) memcpy(ifa->ifa_label, ifr->ifr_name, IFNAMSIZ); else memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); } else { ret = 0; if (ifa->ifa_local == sin->sin_addr.s_addr) break; inet_del_ifa(in_dev, ifap, 0); ifa->ifa_broadcast = 0; ifa->ifa_scope = 0; } ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr; if (!(dev->flags & IFF_POINTOPOINT)) { ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address); ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen); if ((dev->flags & IFF_BROADCAST) && ifa->ifa_prefixlen < 31) ifa->ifa_broadcast = ifa->ifa_address | ~ifa->ifa_mask; } else { ifa->ifa_prefixlen = 32; ifa->ifa_mask = inet_make_mask(32); } set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); ret = inet_set_ifa(dev, ifa); break; case SIOCSIFBRDADDR: /* Set the broadcast address */ ret = 0; if (ifa->ifa_broadcast != sin->sin_addr.s_addr) { inet_del_ifa(in_dev, ifap, 0); ifa->ifa_broadcast = sin->sin_addr.s_addr; inet_insert_ifa(ifa); } break; case SIOCSIFDSTADDR: /* Set the destination address */ ret = 0; if (ifa->ifa_address == sin->sin_addr.s_addr) break; ret = -EINVAL; if (inet_abc_len(sin->sin_addr.s_addr) < 0) break; ret = 0; inet_del_ifa(in_dev, ifap, 0); ifa->ifa_address = sin->sin_addr.s_addr; inet_insert_ifa(ifa); break; case SIOCSIFNETMASK: /* Set the netmask for the interface */ /* * The mask we set must be legal. */ ret = -EINVAL; if (bad_mask(sin->sin_addr.s_addr, 0)) break; ret = 0; if (ifa->ifa_mask != sin->sin_addr.s_addr) { __be32 old_mask = ifa->ifa_mask; inet_del_ifa(in_dev, ifap, 0); ifa->ifa_mask = sin->sin_addr.s_addr; ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); /* See if current broadcast address matches * with current netmask, then recalculate * the broadcast address. Otherwise it's a * funny address, so don't touch it since * the user seems to know what (s)he's doing... */ if ((dev->flags & IFF_BROADCAST) && (ifa->ifa_prefixlen < 31) && (ifa->ifa_broadcast == (ifa->ifa_local|~old_mask))) { ifa->ifa_broadcast = (ifa->ifa_local | ~sin->sin_addr.s_addr); } inet_insert_ifa(ifa); } break; } done: rtnl_net_unlock(net); out: return ret; } int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) { struct in_device *in_dev = __in_dev_get_rtnl_net(dev); const struct in_ifaddr *ifa; struct ifreq ifr; int done = 0; if (WARN_ON(size > sizeof(struct ifreq))) goto out; if (!in_dev) goto out; in_dev_for_each_ifa_rtnl_net(dev_net(dev), ifa, in_dev) { if (!buf) { done += size; continue; } if (len < size) break; memset(&ifr, 0, sizeof(struct ifreq)); strcpy(ifr.ifr_name, ifa->ifa_label); (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET; (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr = ifa->ifa_local; if (copy_to_user(buf + done, &ifr, size)) { done = -EFAULT; break; } len -= size; done += size; } out: return done; } static __be32 in_dev_select_addr(const struct in_device *in_dev, int scope) { const struct in_ifaddr *ifa; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY) continue; if (ifa->ifa_scope != RT_SCOPE_LINK && ifa->ifa_scope <= scope) return ifa->ifa_local; } return 0; } __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) { const struct in_ifaddr *ifa; __be32 addr = 0; unsigned char localnet_scope = RT_SCOPE_HOST; struct in_device *in_dev; struct net *net; int master_idx; rcu_read_lock(); net = dev_net_rcu(dev); in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto no_in_dev; if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev))) localnet_scope = RT_SCOPE_LINK; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY) continue; if (min(ifa->ifa_scope, localnet_scope) > scope) continue; if (!dst || inet_ifa_match(dst, ifa)) { addr = ifa->ifa_local; break; } if (!addr) addr = ifa->ifa_local; } if (addr) goto out_unlock; no_in_dev: master_idx = l3mdev_master_ifindex_rcu(dev); /* For VRFs, the VRF device takes the place of the loopback device, * with addresses on it being preferred. Note in such cases the * loopback device will be among the devices that fail the master_idx * equality check in the loop below. */ if (master_idx && (dev = dev_get_by_index_rcu(net, master_idx)) && (in_dev = __in_dev_get_rcu(dev))) { addr = in_dev_select_addr(in_dev, scope); if (addr) goto out_unlock; } /* Not loopback addresses on loopback should be preferred in this case. It is important that lo is the first interface in dev_base list. */ for_each_netdev_rcu(net, dev) { if (l3mdev_master_ifindex_rcu(dev) != master_idx) continue; in_dev = __in_dev_get_rcu(dev); if (!in_dev) continue; addr = in_dev_select_addr(in_dev, scope); if (addr) goto out_unlock; } out_unlock: rcu_read_unlock(); return addr; } EXPORT_SYMBOL(inet_select_addr); static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst, __be32 local, int scope) { unsigned char localnet_scope = RT_SCOPE_HOST; const struct in_ifaddr *ifa; __be32 addr = 0; int same = 0; if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev))) localnet_scope = RT_SCOPE_LINK; in_dev_for_each_ifa_rcu(ifa, in_dev) { unsigned char min_scope = min(ifa->ifa_scope, localnet_scope); if (!addr && (local == ifa->ifa_local || !local) && min_scope <= scope) { addr = ifa->ifa_local; if (same) break; } if (!same) { same = (!local || inet_ifa_match(local, ifa)) && (!dst || inet_ifa_match(dst, ifa)); if (same && addr) { if (local || !dst) break; /* Is the selected addr into dst subnet? */ if (inet_ifa_match(addr, ifa)) break; /* No, then can we use new local src? */ if (min_scope <= scope) { addr = ifa->ifa_local; break; } /* search for large dst subnet for addr */ same = 0; } } } return same ? addr : 0; } /* * Confirm that local IP address exists using wildcards: * - net: netns to check, cannot be NULL * - in_dev: only on this interface, NULL=any interface * - dst: only in the same subnet as dst, 0=any dst * - local: address, 0=autoselect the local address * - scope: maximum allowed scope value for the local address */ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope) { __be32 addr = 0; struct net_device *dev; if (in_dev) return confirm_addr_indev(in_dev, dst, local, scope); rcu_read_lock(); for_each_netdev_rcu(net, dev) { in_dev = __in_dev_get_rcu(dev); if (in_dev) { addr = confirm_addr_indev(in_dev, dst, local, scope); if (addr) break; } } rcu_read_unlock(); return addr; } EXPORT_SYMBOL(inet_confirm_addr); /* * Device notifier */ int register_inetaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&inetaddr_chain, nb); } EXPORT_SYMBOL(register_inetaddr_notifier); int unregister_inetaddr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&inetaddr_chain, nb); } EXPORT_SYMBOL(unregister_inetaddr_notifier); int register_inetaddr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&inetaddr_validator_chain, nb); } EXPORT_SYMBOL(register_inetaddr_validator_notifier); int unregister_inetaddr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&inetaddr_validator_chain, nb); } EXPORT_SYMBOL(unregister_inetaddr_validator_notifier); /* Rename ifa_labels for a device name change. Make some effort to preserve * existing alias numbering and to create unique labels if possible. */ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) { struct in_ifaddr *ifa; int named = 0; in_dev_for_each_ifa_rtnl(ifa, in_dev) { char old[IFNAMSIZ], *dot; memcpy(old, ifa->ifa_label, IFNAMSIZ); memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); if (named++ == 0) goto skip; dot = strchr(old, ':'); if (!dot) { sprintf(old, ":%d", named); dot = old; } if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) strcat(ifa->ifa_label, dot); else strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); skip: rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); } } static void inetdev_send_gratuitous_arp(struct net_device *dev, struct in_device *in_dev) { const struct in_ifaddr *ifa; in_dev_for_each_ifa_rtnl(ifa, in_dev) { arp_send(ARPOP_REQUEST, ETH_P_ARP, ifa->ifa_local, dev, ifa->ifa_local, NULL, dev->dev_addr, NULL); } } /* Called only under RTNL semaphore */ static int inetdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); if (!in_dev) { if (event == NETDEV_REGISTER) { in_dev = inetdev_init(dev); if (IS_ERR(in_dev)) return notifier_from_errno(PTR_ERR(in_dev)); if (dev->flags & IFF_LOOPBACK) { IN_DEV_CONF_SET(in_dev, NOXFRM, 1); IN_DEV_CONF_SET(in_dev, NOPOLICY, 1); } } else if (event == NETDEV_CHANGEMTU) { /* Re-enabling IP */ if (inetdev_valid_mtu(dev->mtu)) in_dev = inetdev_init(dev); } goto out; } switch (event) { case NETDEV_REGISTER: pr_debug("%s: bug\n", __func__); RCU_INIT_POINTER(dev->ip_ptr, NULL); break; case NETDEV_UP: if (!inetdev_valid_mtu(dev->mtu)) break; if (dev->flags & IFF_LOOPBACK) { struct in_ifaddr *ifa = inet_alloc_ifa(in_dev); if (ifa) { ifa->ifa_local = ifa->ifa_address = htonl(INADDR_LOOPBACK); ifa->ifa_prefixlen = 8; ifa->ifa_mask = inet_make_mask(8); ifa->ifa_scope = RT_SCOPE_HOST; memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); ipv4_devconf_setall(in_dev); neigh_parms_data_state_setall(in_dev->arp_parms); inet_insert_ifa(ifa); } } ip_mc_up(in_dev); fallthrough; case NETDEV_CHANGEADDR: if (!IN_DEV_ARP_NOTIFY(in_dev)) break; fallthrough; case NETDEV_NOTIFY_PEERS: /* Send gratuitous ARP to notify of link change */ inetdev_send_gratuitous_arp(dev, in_dev); break; case NETDEV_DOWN: ip_mc_down(in_dev); break; case NETDEV_PRE_TYPE_CHANGE: ip_mc_unmap(in_dev); break; case NETDEV_POST_TYPE_CHANGE: ip_mc_remap(in_dev); break; case NETDEV_CHANGEMTU: if (inetdev_valid_mtu(dev->mtu)) break; /* disable IP when MTU is not enough */ fallthrough; case NETDEV_UNREGISTER: inetdev_destroy(in_dev); break; case NETDEV_CHANGENAME: /* Do not notify about label change, this event is * not interesting to applications using netlink. */ inetdev_changename(dev, in_dev); devinet_sysctl_unregister(in_dev); devinet_sysctl_register(in_dev); break; } out: return NOTIFY_DONE; } static struct notifier_block ip_netdev_notifier = { .notifier_call = inetdev_event, }; static size_t inet_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(4) /* IFA_ADDRESS */ + nla_total_size(4) /* IFA_LOCAL */ + nla_total_size(4) /* IFA_BROADCAST */ + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ + nla_total_size(4) /* IFA_FLAGS */ + nla_total_size(1) /* IFA_PROTO */ + nla_total_size(4) /* IFA_RT_PRIORITY */ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */ } static inline u32 cstamp_delta(unsigned long cstamp) { return (cstamp - INITIAL_JIFFIES) * 100UL / HZ; } static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp, unsigned long tstamp, u32 preferred, u32 valid) { struct ifa_cacheinfo ci; ci.cstamp = cstamp_delta(cstamp); ci.tstamp = cstamp_delta(tstamp); ci.ifa_prefered = preferred; ci.ifa_valid = valid; return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci); } static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa, struct inet_fill_args *args) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; unsigned long tstamp; u32 preferred, valid; u32 flags; nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm), args->flags); if (!nlh) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_INET; ifm->ifa_prefixlen = ifa->ifa_prefixlen; flags = READ_ONCE(ifa->ifa_flags); /* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits. * The 32bit value is given in IFA_FLAGS attribute. */ ifm->ifa_flags = (__u8)flags; ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_index = ifa->ifa_dev->dev->ifindex; if (args->netnsid >= 0 && nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) goto nla_put_failure; tstamp = READ_ONCE(ifa->ifa_tstamp); if (!(flags & IFA_F_PERMANENT)) { preferred = READ_ONCE(ifa->ifa_preferred_lft); valid = READ_ONCE(ifa->ifa_valid_lft); if (preferred != INFINITY_LIFE_TIME) { long tval = (jiffies - tstamp) / HZ; if (preferred > tval) preferred -= tval; else preferred = 0; if (valid != INFINITY_LIFE_TIME) { if (valid > tval) valid -= tval; else valid = 0; } } } else { preferred = INFINITY_LIFE_TIME; valid = INFINITY_LIFE_TIME; } if ((ifa->ifa_address && nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) || (ifa->ifa_local && nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) || (ifa->ifa_broadcast && nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) || (ifa->ifa_label[0] && nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) || (ifa->ifa_proto && nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) || nla_put_u32(skb, IFA_FLAGS, flags) || (ifa->ifa_rt_priority && nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) || put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp, preferred, valid)) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh, struct inet_fill_args *fillargs, struct net **tgt_net, struct sock *sk, struct netlink_callback *cb) { struct netlink_ext_ack *extack = cb->extack; struct nlattr *tb[IFA_MAX+1]; struct ifaddrmsg *ifm; int err, i; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request"); return -EINVAL; } ifm = nlmsg_data(nlh); if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) { NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request"); return -EINVAL; } fillargs->ifindex = ifm->ifa_index; if (fillargs->ifindex) { cb->answer_flags |= NLM_F_DUMP_FILTERED; fillargs->flags |= NLM_F_DUMP_FILTERED; } err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy, extack); if (err < 0) return err; for (i = 0; i <= IFA_MAX; ++i) { if (!tb[i]) continue; if (i == IFA_TARGET_NETNSID) { struct net *net; fillargs->netnsid = nla_get_s32(tb[i]); net = rtnl_get_net_ns_capable(sk, fillargs->netnsid); if (IS_ERR(net)) { fillargs->netnsid = -1; NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id"); return PTR_ERR(net); } *tgt_net = net; } else { NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request"); return -EINVAL; } } return 0; } static int in_dev_dump_ifmcaddr(struct in_device *in_dev, struct sk_buff *skb, struct netlink_callback *cb, int *s_ip_idx, struct inet_fill_args *fillargs) { struct ip_mc_list *im; int ip_idx = 0; int err; for (im = rcu_dereference(in_dev->mc_list); im; im = rcu_dereference(im->next_rcu)) { if (ip_idx < *s_ip_idx) { ip_idx++; continue; } err = inet_fill_ifmcaddr(skb, in_dev->dev, im, fillargs); if (err < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); ip_idx++; } err = 0; ip_idx = 0; done: *s_ip_idx = ip_idx; return err; } static int in_dev_dump_ifaddr(struct in_device *in_dev, struct sk_buff *skb, struct netlink_callback *cb, int *s_ip_idx, struct inet_fill_args *fillargs) { struct in_ifaddr *ifa; int ip_idx = 0; int err; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (ip_idx < *s_ip_idx) { ip_idx++; continue; } err = inet_fill_ifaddr(skb, ifa, fillargs); if (err < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); ip_idx++; } err = 0; ip_idx = 0; done: *s_ip_idx = ip_idx; return err; } static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb, struct netlink_callback *cb, int *s_ip_idx, struct inet_fill_args *fillargs) { switch (fillargs->event) { case RTM_NEWADDR: return in_dev_dump_ifaddr(in_dev, skb, cb, s_ip_idx, fillargs); case RTM_GETMULTICAST: return in_dev_dump_ifmcaddr(in_dev, skb, cb, s_ip_idx, fillargs); default: return -EINVAL; } } /* Combine dev_addr_genid and dev_base_seq to detect changes. */ static u32 inet_base_seq(const struct net *net) { u32 res = atomic_read(&net->ipv4.dev_addr_genid) + READ_ONCE(net->dev_base_seq); /* Must not return 0 (see nl_dump_check_consistent()). * Chose a value far away from 0. */ if (!res) res = 0x80000000; return res; } static int inet_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, int event) { const struct nlmsghdr *nlh = cb->nlh; struct inet_fill_args fillargs = { .portid = NETLINK_CB(cb->skb).portid, .seq = nlh->nlmsg_seq, .event = event, .flags = NLM_F_MULTI, .netnsid = -1, }; struct net *net = sock_net(skb->sk); struct net *tgt_net = net; struct { unsigned long ifindex; int ip_idx; } *ctx = (void *)cb->ctx; struct in_device *in_dev; struct net_device *dev; int err = 0; rcu_read_lock(); if (cb->strict_check) { err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net, skb->sk, cb); if (err < 0) goto done; if (fillargs.ifindex) { dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex); if (!dev) { err = -ENODEV; goto done; } in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto done; err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx, &fillargs); goto done; } } cb->seq = inet_base_seq(tgt_net); for_each_netdev_dump(tgt_net, dev, ctx->ifindex) { in_dev = __in_dev_get_rcu(dev); if (!in_dev) continue; err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx, &fillargs); if (err < 0) goto done; } done: if (fillargs.netnsid >= 0) put_net(tgt_net); rcu_read_unlock(); return err; } static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { return inet_dump_addr(skb, cb, RTM_NEWADDR); } static int inet_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) { return inet_dump_addr(skb, cb, RTM_GETMULTICAST); } static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, u32 portid) { struct inet_fill_args fillargs = { .portid = portid, .seq = nlh ? nlh->nlmsg_seq : 0, .event = event, .flags = 0, .netnsid = -1, }; struct sk_buff *skb; int err = -ENOBUFS; struct net *net; net = dev_net(ifa->ifa_dev->dev); skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); if (!skb) goto errout; err = inet_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); return; errout: rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err); } static size_t inet_get_link_af_size(const struct net_device *dev, u32 ext_filter_mask) { struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr); if (!in_dev) return 0; return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */ } static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev, u32 ext_filter_mask) { struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr); struct nlattr *nla; int i; if (!in_dev) return -ENODATA; nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4); if (!nla) return -EMSGSIZE; for (i = 0; i < IPV4_DEVCONF_MAX; i++) ((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]); return 0; } static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = { [IFLA_INET_CONF] = { .type = NLA_NESTED }, }; static int inet_validate_link_af(const struct net_device *dev, const struct nlattr *nla, struct netlink_ext_ack *extack) { struct nlattr *a, *tb[IFLA_INET_MAX+1]; int err, rem; if (dev && !__in_dev_get_rtnl(dev)) return -EAFNOSUPPORT; err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, inet_af_policy, extack); if (err < 0) return err; if (tb[IFLA_INET_CONF]) { nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) { int cfgid = nla_type(a); if (nla_len(a) < 4) return -EINVAL; if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX) return -EINVAL; } } return 0; } static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla, struct netlink_ext_ack *extack) { struct in_device *in_dev = __in_dev_get_rtnl(dev); struct nlattr *a, *tb[IFLA_INET_MAX+1]; int rem; if (!in_dev) return -EAFNOSUPPORT; if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0) return -EINVAL; if (tb[IFLA_INET_CONF]) { nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a)); } return 0; } static int inet_netconf_msgsize_devconf(int type) { int size = NLMSG_ALIGN(sizeof(struct netconfmsg)) + nla_total_size(4); /* NETCONFA_IFINDEX */ bool all = false; if (type == NETCONFA_ALL) all = true; if (all || type == NETCONFA_FORWARDING) size += nla_total_size(4); if (all || type == NETCONFA_RP_FILTER) size += nla_total_size(4); if (all || type == NETCONFA_MC_FORWARDING) size += nla_total_size(4); if (all || type == NETCONFA_BC_FORWARDING) size += nla_total_size(4); if (all || type == NETCONFA_PROXY_NEIGH) size += nla_total_size(4); if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) size += nla_total_size(4); return size; } static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, const struct ipv4_devconf *devconf, u32 portid, u32 seq, int event, unsigned int flags, int type) { struct nlmsghdr *nlh; struct netconfmsg *ncm; bool all = false; nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), flags); if (!nlh) return -EMSGSIZE; if (type == NETCONFA_ALL) all = true; ncm = nlmsg_data(nlh); ncm->ncm_family = AF_INET; if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0) goto nla_put_failure; if (!devconf) goto out; if ((all || type == NETCONFA_FORWARDING) && nla_put_s32(skb, NETCONFA_FORWARDING, IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0) goto nla_put_failure; if ((all || type == NETCONFA_RP_FILTER) && nla_put_s32(skb, NETCONFA_RP_FILTER, IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0) goto nla_put_failure; if ((all || type == NETCONFA_MC_FORWARDING) && nla_put_s32(skb, NETCONFA_MC_FORWARDING, IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0) goto nla_put_failure; if ((all || type == NETCONFA_BC_FORWARDING) && nla_put_s32(skb, NETCONFA_BC_FORWARDING, IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0) goto nla_put_failure; if ((all || type == NETCONFA_PROXY_NEIGH) && nla_put_s32(skb, NETCONFA_PROXY_NEIGH, IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0) goto nla_put_failure; if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) && nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, IPV4_DEVCONF_RO(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0) goto nla_put_failure; out: nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } void inet_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv4_devconf *devconf) { struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_KERNEL); if (!skb) goto errout; err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, event, 0, type); if (err < 0) { /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_KERNEL); return; errout: rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err); } static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = { [NETCONFA_IFINDEX] = { .len = sizeof(int) }, [NETCONFA_FORWARDING] = { .len = sizeof(int) }, [NETCONFA_RP_FILTER] = { .len = sizeof(int) }, [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) }, }; static int inet_netconf_valid_get_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack) { int i, err; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) { NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf get request"); return -EINVAL; } if (!netlink_strict_get_check(skb)) return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg), tb, NETCONFA_MAX, devconf_ipv4_policy, extack); err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg), tb, NETCONFA_MAX, devconf_ipv4_policy, extack); if (err) return err; for (i = 0; i <= NETCONFA_MAX; i++) { if (!tb[i]) continue; switch (i) { case NETCONFA_IFINDEX: break; default: NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in netconf get request"); return -EINVAL; } } return 0; } static int inet_netconf_get_devconf(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[NETCONFA_MAX + 1]; const struct ipv4_devconf *devconf; struct in_device *in_dev = NULL; struct net_device *dev = NULL; struct sk_buff *skb; int ifindex; int err; err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack); if (err) return err; if (!tb[NETCONFA_IFINDEX]) return -EINVAL; ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); switch (ifindex) { case NETCONFA_IFINDEX_ALL: devconf = net->ipv4.devconf_all; break; case NETCONFA_IFINDEX_DEFAULT: devconf = net->ipv4.devconf_dflt; break; default: err = -ENODEV; dev = dev_get_by_index(net, ifindex); if (dev) in_dev = in_dev_get(dev); if (!in_dev) goto errout; devconf = &in_dev->cnf; break; } err = -ENOBUFS; skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL); if (!skb) goto errout; err = inet_netconf_fill_devconf(skb, ifindex, devconf, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, 0, NETCONFA_ALL); if (err < 0) { /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: if (in_dev) in_dev_put(in_dev); dev_put(dev); return err; } static int inet_netconf_dump_devconf(struct sk_buff *skb, struct netlink_callback *cb) { const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct { unsigned long ifindex; unsigned int all_default; } *ctx = (void *)cb->ctx; const struct in_device *in_dev; struct net_device *dev; int err = 0; if (cb->strict_check) { struct netlink_ext_ack *extack = cb->extack; struct netconfmsg *ncm; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) { NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request"); return -EINVAL; } if (nlmsg_attrlen(nlh, sizeof(*ncm))) { NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request"); return -EINVAL; } } rcu_read_lock(); for_each_netdev_dump(net, dev, ctx->ifindex) { in_dev = __in_dev_get_rcu(dev); if (!in_dev) continue; err = inet_netconf_fill_devconf(skb, dev->ifindex, &in_dev->cnf, NETLINK_CB(cb->skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL); if (err < 0) goto done; } if (ctx->all_default == 0) { err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, net->ipv4.devconf_all, NETLINK_CB(cb->skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL); if (err < 0) goto done; ctx->all_default++; } if (ctx->all_default == 1) { err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, net->ipv4.devconf_dflt, NETLINK_CB(cb->skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, NETCONFA_ALL); if (err < 0) goto done; ctx->all_default++; } done: rcu_read_unlock(); return err; } #ifdef CONFIG_SYSCTL static void devinet_copy_dflt_conf(struct net *net, int i) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(net, dev) { struct in_device *in_dev; in_dev = __in_dev_get_rcu(dev); if (in_dev && !test_bit(i, in_dev->cnf.state)) in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i]; } rcu_read_unlock(); } /* called with RTNL locked */ static void inet_forward_change(struct net *net) { struct net_device *dev; int on = IPV4_DEVCONF_ALL(net, FORWARDING); IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on; IPV4_DEVCONF_DFLT(net, FORWARDING) = on; inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, NETCONFA_IFINDEX_ALL, net->ipv4.devconf_all); inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv4.devconf_dflt); for_each_netdev(net, dev) { struct in_device *in_dev; if (on) dev_disable_lro(dev); in_dev = __in_dev_get_rtnl_net(dev); if (in_dev) { IN_DEV_CONF_SET(in_dev, FORWARDING, on); inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, dev->ifindex, &in_dev->cnf); } } } static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf) { if (cnf == net->ipv4.devconf_dflt) return NETCONFA_IFINDEX_DEFAULT; else if (cnf == net->ipv4.devconf_all) return NETCONFA_IFINDEX_ALL; else { struct in_device *idev = container_of(cnf, struct in_device, cnf); return idev->dev->ifindex; } } static int devinet_conf_proc(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int old_value = *(int *)ctl->data; int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); int new_value = *(int *)ctl->data; if (write) { struct ipv4_devconf *cnf = ctl->extra1; struct net *net = ctl->extra2; int i = (int *)ctl->data - cnf->data; int ifindex; set_bit(i, cnf->state); if (cnf == net->ipv4.devconf_dflt) devinet_copy_dflt_conf(net, i); if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 || i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) if ((new_value == 0) && (old_value != 0)) rt_cache_flush(net); if (i == IPV4_DEVCONF_BC_FORWARDING - 1 && new_value != old_value) rt_cache_flush(net); if (i == IPV4_DEVCONF_RP_FILTER - 1 && new_value != old_value) { ifindex = devinet_conf_ifindex(net, cnf); inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_RP_FILTER, ifindex, cnf); } if (i == IPV4_DEVCONF_PROXY_ARP - 1 && new_value != old_value) { ifindex = devinet_conf_ifindex(net, cnf); inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_PROXY_NEIGH, ifindex, cnf); } if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 && new_value != old_value) { ifindex = devinet_conf_ifindex(net, cnf); inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, ifindex, cnf); } } return ret; } static int devinet_sysctl_forward(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; struct net *net = ctl->extra2; int ret; if (write && !ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write && *valp != val) { if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { if (!rtnl_net_trylock(net)) { /* Restore the original values before restarting */ *valp = val; *ppos = pos; return restart_syscall(); } if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { inet_forward_change(net); } else { struct ipv4_devconf *cnf = ctl->extra1; struct in_device *idev = container_of(cnf, struct in_device, cnf); if (*valp) dev_disable_lro(idev->dev); inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, idev->dev->ifindex, cnf); } rtnl_net_unlock(net); rt_cache_flush(net); } else inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_FORWARDING, NETCONFA_IFINDEX_DEFAULT, net->ipv4.devconf_dflt); } return ret; } static int ipv4_doint_and_flush(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int *valp = ctl->data; int val = *valp; int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); struct net *net = ctl->extra2; if (write && *valp != val) rt_cache_flush(net); return ret; } #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \ { \ .procname = name, \ .data = ipv4_devconf.data + \ IPV4_DEVCONF_ ## attr - 1, \ .maxlen = sizeof(int), \ .mode = mval, \ .proc_handler = proc, \ .extra1 = &ipv4_devconf, \ } #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \ DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc) #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \ DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc) #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \ DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc) #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \ DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush) static struct devinet_sysctl_table { struct ctl_table_header *sysctl_header; struct ctl_table devinet_vars[IPV4_DEVCONF_MAX]; } devinet_sysctl = { .devinet_vars = { DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding", devinet_sysctl_forward), DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"), DEVINET_SYSCTL_RW_ENTRY(BC_FORWARDING, "bc_forwarding"), DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"), DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"), DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"), DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"), DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"), DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, "accept_source_route"), DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"), DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"), DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"), DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"), DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"), DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), DEVINET_SYSCTL_RW_ENTRY(ARP_EVICT_NOCARRIER, "arp_evict_nocarrier"), DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"), DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION, "force_igmp_version"), DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL, "igmpv2_unsolicited_report_interval"), DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL, "igmpv3_unsolicited_report_interval"), DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN, "ignore_routes_with_linkdown"), DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP, "drop_gratuitous_arp"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES, "promote_secondaries"), DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET, "route_localnet"), DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST, "drop_unicast_in_l2_multicast"), }, }; static int __devinet_sysctl_register(struct net *net, char *dev_name, int ifindex, struct ipv4_devconf *p) { int i; struct devinet_sysctl_table *t; char path[sizeof("net/ipv4/conf/") + IFNAMSIZ]; t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL_ACCOUNT); if (!t) goto out; for (i = 0; i < ARRAY_SIZE(t->devinet_vars); i++) { t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf; t->devinet_vars[i].extra1 = p; t->devinet_vars[i].extra2 = net; } snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name); t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars); if (!t->sysctl_header) goto free; p->sysctl = t; inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, ifindex, p); return 0; free: kfree(t); out: return -ENOMEM; } static void __devinet_sysctl_unregister(struct net *net, struct ipv4_devconf *cnf, int ifindex) { struct devinet_sysctl_table *t = cnf->sysctl; if (t) { cnf->sysctl = NULL; unregister_net_sysctl_table(t->sysctl_header); kfree(t); } inet_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL); } static int devinet_sysctl_register(struct in_device *idev) { int err; if (!sysctl_dev_name_is_allowed(idev->dev->name)) return -EINVAL; err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL); if (err) return err; err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name, idev->dev->ifindex, &idev->cnf); if (err) neigh_sysctl_unregister(idev->arp_parms); return err; } static void devinet_sysctl_unregister(struct in_device *idev) { struct net *net = dev_net(idev->dev); __devinet_sysctl_unregister(net, &idev->cnf, idev->dev->ifindex); neigh_sysctl_unregister(idev->arp_parms); } static struct ctl_table ctl_forward_entry[] = { { .procname = "ip_forward", .data = &ipv4_devconf.data[ IPV4_DEVCONF_FORWARDING - 1], .maxlen = sizeof(int), .mode = 0644, .proc_handler = devinet_sysctl_forward, .extra1 = &ipv4_devconf, .extra2 = &init_net, }, }; #endif static __net_init int devinet_init_net(struct net *net) { #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; struct ctl_table *tbl; #endif struct ipv4_devconf *all, *dflt; int err; int i; err = -ENOMEM; net->ipv4.inet_addr_lst = kmalloc_array(IN4_ADDR_HSIZE, sizeof(struct hlist_head), GFP_KERNEL); if (!net->ipv4.inet_addr_lst) goto err_alloc_hash; all = kmemdup(&ipv4_devconf, sizeof(ipv4_devconf), GFP_KERNEL); if (!all) goto err_alloc_all; dflt = kmemdup(&ipv4_devconf_dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL); if (!dflt) goto err_alloc_dflt; #ifdef CONFIG_SYSCTL tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL); if (!tbl) goto err_alloc_ctl; tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; tbl[0].extra1 = all; tbl[0].extra2 = net; #endif if (!net_eq(net, &init_net)) { switch (net_inherit_devconf()) { case 3: /* copy from the current netns */ memcpy(all, current->nsproxy->net_ns->ipv4.devconf_all, sizeof(ipv4_devconf)); memcpy(dflt, current->nsproxy->net_ns->ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt)); break; case 0: case 1: /* copy from init_net */ memcpy(all, init_net.ipv4.devconf_all, sizeof(ipv4_devconf)); memcpy(dflt, init_net.ipv4.devconf_dflt, sizeof(ipv4_devconf_dflt)); break; case 2: /* use compiled values */ break; } } #ifdef CONFIG_SYSCTL err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all); if (err < 0) goto err_reg_all; err = __devinet_sysctl_register(net, "default", NETCONFA_IFINDEX_DEFAULT, dflt); if (err < 0) goto err_reg_dflt; err = -ENOMEM; forw_hdr = register_net_sysctl_sz(net, "net/ipv4", tbl, ARRAY_SIZE(ctl_forward_entry)); if (!forw_hdr) goto err_reg_ctl; net->ipv4.forw_hdr = forw_hdr; #endif for (i = 0; i < IN4_ADDR_HSIZE; i++) INIT_HLIST_HEAD(&net->ipv4.inet_addr_lst[i]); INIT_DEFERRABLE_WORK(&net->ipv4.addr_chk_work, check_lifetime); net->ipv4.devconf_all = all; net->ipv4.devconf_dflt = dflt; return 0; #ifdef CONFIG_SYSCTL err_reg_ctl: __devinet_sysctl_unregister(net, dflt, NETCONFA_IFINDEX_DEFAULT); err_reg_dflt: __devinet_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL); err_reg_all: kfree(tbl); err_alloc_ctl: #endif kfree(dflt); err_alloc_dflt: kfree(all); err_alloc_all: kfree(net->ipv4.inet_addr_lst); err_alloc_hash: return err; } static __net_exit void devinet_exit_net(struct net *net) { #ifdef CONFIG_SYSCTL const struct ctl_table *tbl; #endif cancel_delayed_work_sync(&net->ipv4.addr_chk_work); #ifdef CONFIG_SYSCTL tbl = net->ipv4.forw_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.forw_hdr); __devinet_sysctl_unregister(net, net->ipv4.devconf_dflt, NETCONFA_IFINDEX_DEFAULT); __devinet_sysctl_unregister(net, net->ipv4.devconf_all, NETCONFA_IFINDEX_ALL); kfree(tbl); #endif kfree(net->ipv4.devconf_dflt); kfree(net->ipv4.devconf_all); kfree(net->ipv4.inet_addr_lst); } static __net_initdata struct pernet_operations devinet_ops = { .init = devinet_init_net, .exit = devinet_exit_net, }; static struct rtnl_af_ops inet_af_ops __read_mostly = { .family = AF_INET, .fill_link_af = inet_fill_link_af, .get_link_af_size = inet_get_link_af_size, .validate_link_af = inet_validate_link_af, .set_link_af = inet_set_link_af, }; static const struct rtnl_msg_handler devinet_rtnl_msg_handlers[] __initconst = { {.protocol = PF_INET, .msgtype = RTM_NEWADDR, .doit = inet_rtm_newaddr, .flags = RTNL_FLAG_DOIT_PERNET}, {.protocol = PF_INET, .msgtype = RTM_DELADDR, .doit = inet_rtm_deladdr, .flags = RTNL_FLAG_DOIT_PERNET}, {.protocol = PF_INET, .msgtype = RTM_GETADDR, .dumpit = inet_dump_ifaddr, .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, {.protocol = PF_INET, .msgtype = RTM_GETNETCONF, .doit = inet_netconf_get_devconf, .dumpit = inet_netconf_dump_devconf, .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, {.owner = THIS_MODULE, .protocol = PF_INET, .msgtype = RTM_GETMULTICAST, .dumpit = inet_dump_ifmcaddr, .flags = RTNL_FLAG_DUMP_UNLOCKED}, }; void __init devinet_init(void) { register_pernet_subsys(&devinet_ops); register_netdevice_notifier(&ip_netdev_notifier); if (rtnl_af_register(&inet_af_ops)) panic("Unable to register inet_af_ops\n"); rtnl_register_many(devinet_rtnl_msg_handlers); }
313 11 449 111 103 4 8 45 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 1999-2002 Vojtech Pavlik */ #ifndef _INPUT_H #define _INPUT_H #include <linux/time.h> #include <linux/list.h> #include <uapi/linux/input.h> /* Implementation details, userspace should not care about these */ #define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR #define ABS_MT_LAST ABS_MT_TOOL_Y /* * In-kernel definitions. */ #include <linux/device.h> #include <linux/fs.h> #include <linux/timer.h> #include <linux/mod_devicetable.h> struct input_dev_poller; /** * struct input_value - input value representation * @type: type of value (EV_KEY, EV_ABS, etc) * @code: the value code * @value: the value */ struct input_value { __u16 type; __u16 code; __s32 value; }; enum input_clock_type { INPUT_CLK_REAL = 0, INPUT_CLK_MONO, INPUT_CLK_BOOT, INPUT_CLK_MAX }; /** * struct input_dev - represents an input device * @name: name of the device * @phys: physical path to the device in the system hierarchy * @uniq: unique identification code for the device (if device has it) * @id: id of the device (struct input_id) * @propbit: bitmap of device properties and quirks * @evbit: bitmap of types of events supported by the device (EV_KEY, * EV_REL, etc.) * @keybit: bitmap of keys/buttons this device has * @relbit: bitmap of relative axes for the device * @absbit: bitmap of absolute axes for the device * @mscbit: bitmap of miscellaneous events supported by the device * @ledbit: bitmap of leds present on the device * @sndbit: bitmap of sound effects supported by the device * @ffbit: bitmap of force feedback effects supported by the device * @swbit: bitmap of switches present on the device * @hint_events_per_packet: average number of events generated by the * device in a packet (between EV_SYN/SYN_REPORT events). Used by * event handlers to estimate size of the buffer needed to hold * events. * @keycodemax: size of keycode table * @keycodesize: size of elements in keycode table * @keycode: map of scancodes to keycodes for this device * @getkeycode: optional legacy method to retrieve current keymap. * @setkeycode: optional method to alter current keymap, used to implement * sparse keymaps. If not supplied default mechanism will be used. * The method is being called while holding event_lock and thus must * not sleep * @ff: force feedback structure associated with the device if device * supports force feedback effects * @poller: poller structure associated with the device if device is * set up to use polling mode * @repeat_key: stores key code of the last key pressed; used to implement * software autorepeat * @timer: timer for software autorepeat * @rep: current values for autorepeat parameters (delay, rate) * @mt: pointer to multitouch state * @absinfo: array of &struct input_absinfo elements holding information * about absolute axes (current value, min, max, flat, fuzz, * resolution) * @key: reflects current state of device's keys/buttons * @led: reflects current state of device's LEDs * @snd: reflects current state of sound effects * @sw: reflects current state of device's switches * @open: this method is called when the very first user calls * input_open_device(). The driver must prepare the device * to start generating events (start polling thread, * request an IRQ, submit URB, etc.). The meaning of open() is * to start providing events to the input core. * @close: this method is called when the very last user calls * input_close_device(). The meaning of close() is to stop * providing events to the input core. * @flush: purges the device. Most commonly used to get rid of force * feedback effects loaded into the device when disconnecting * from it * @event: event handler for events sent _to_ the device, like EV_LED * or EV_SND. The device is expected to carry out the requested * action (turn on a LED, play sound, etc.) The call is protected * by @event_lock and must not sleep * @grab: input handle that currently has the device grabbed (via * EVIOCGRAB ioctl). When a handle grabs a device it becomes sole * recipient for all input events coming from the device * @event_lock: this spinlock is taken when input core receives * and processes a new event for the device (in input_event()). * Code that accesses and/or modifies parameters of a device * (such as keymap or absmin, absmax, absfuzz, etc.) after device * has been registered with input core must take this lock. * @mutex: serializes calls to open(), close() and flush() methods * @users: stores number of users (input handlers) that opened this * device. It is used by input_open_device() and input_close_device() * to make sure that dev->open() is only called when the first * user opens device and dev->close() is called when the very * last user closes the device * @going_away: marks devices that are in a middle of unregistering and * causes input_open_device*() fail with -ENODEV. * @dev: driver model's view of this device * @h_list: list of input handles associated with the device. When * accessing the list dev->mutex must be held * @node: used to place the device onto input_dev_list * @num_vals: number of values queued in the current frame * @max_vals: maximum number of values queued in a frame * @vals: array of values queued in the current frame * @devres_managed: indicates that devices is managed with devres framework * and needs not be explicitly unregistered or freed. * @timestamp: storage for a timestamp set by input_set_timestamp called * by a driver * @inhibited: indicates that the input device is inhibited. If that is * the case then input core ignores any events generated by the device. * Device's close() is called when it is being inhibited and its open() * is called when it is being uninhibited. */ struct input_dev { const char *name; const char *phys; const char *uniq; struct input_id id; unsigned long propbit[BITS_TO_LONGS(INPUT_PROP_CNT)]; unsigned long evbit[BITS_TO_LONGS(EV_CNT)]; unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; unsigned long relbit[BITS_TO_LONGS(REL_CNT)]; unsigned long absbit[BITS_TO_LONGS(ABS_CNT)]; unsigned long mscbit[BITS_TO_LONGS(MSC_CNT)]; unsigned long ledbit[BITS_TO_LONGS(LED_CNT)]; unsigned long sndbit[BITS_TO_LONGS(SND_CNT)]; unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; unsigned long swbit[BITS_TO_LONGS(SW_CNT)]; unsigned int hint_events_per_packet; unsigned int keycodemax; unsigned int keycodesize; void *keycode; int (*setkeycode)(struct input_dev *dev, const struct input_keymap_entry *ke, unsigned int *old_keycode); int (*getkeycode)(struct input_dev *dev, struct input_keymap_entry *ke); struct ff_device *ff; struct input_dev_poller *poller; unsigned int repeat_key; struct timer_list timer; int rep[REP_CNT]; struct input_mt *mt; struct input_absinfo *absinfo; unsigned long key[BITS_TO_LONGS(KEY_CNT)]; unsigned long led[BITS_TO_LONGS(LED_CNT)]; unsigned long snd[BITS_TO_LONGS(SND_CNT)]; unsigned long sw[BITS_TO_LONGS(SW_CNT)]; int (*open)(struct input_dev *dev); void (*close)(struct input_dev *dev); int (*flush)(struct input_dev *dev, struct file *file); int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); struct input_handle __rcu *grab; spinlock_t event_lock; struct mutex mutex; unsigned int users; bool going_away; struct device dev; struct list_head h_list; struct list_head node; unsigned int num_vals; unsigned int max_vals; struct input_value *vals; bool devres_managed; ktime_t timestamp[INPUT_CLK_MAX]; bool inhibited; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) /* * Verify that we are in sync with input_device_id mod_devicetable.h #defines */ #if EV_MAX != INPUT_DEVICE_ID_EV_MAX #error "EV_MAX and INPUT_DEVICE_ID_EV_MAX do not match" #endif #if KEY_MIN_INTERESTING != INPUT_DEVICE_ID_KEY_MIN_INTERESTING #error "KEY_MIN_INTERESTING and INPUT_DEVICE_ID_KEY_MIN_INTERESTING do not match" #endif #if KEY_MAX != INPUT_DEVICE_ID_KEY_MAX #error "KEY_MAX and INPUT_DEVICE_ID_KEY_MAX do not match" #endif #if REL_MAX != INPUT_DEVICE_ID_REL_MAX #error "REL_MAX and INPUT_DEVICE_ID_REL_MAX do not match" #endif #if ABS_MAX != INPUT_DEVICE_ID_ABS_MAX #error "ABS_MAX and INPUT_DEVICE_ID_ABS_MAX do not match" #endif #if MSC_MAX != INPUT_DEVICE_ID_MSC_MAX #error "MSC_MAX and INPUT_DEVICE_ID_MSC_MAX do not match" #endif #if LED_MAX != INPUT_DEVICE_ID_LED_MAX #error "LED_MAX and INPUT_DEVICE_ID_LED_MAX do not match" #endif #if SND_MAX != INPUT_DEVICE_ID_SND_MAX #error "SND_MAX and INPUT_DEVICE_ID_SND_MAX do not match" #endif #if FF_MAX != INPUT_DEVICE_ID_FF_MAX #error "FF_MAX and INPUT_DEVICE_ID_FF_MAX do not match" #endif #if SW_MAX != INPUT_DEVICE_ID_SW_MAX #error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" #endif #if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX #error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match" #endif #define INPUT_DEVICE_ID_MATCH_DEVICE \ (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) #define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ (INPUT_DEVICE_ID_MATCH_DEVICE | INPUT_DEVICE_ID_MATCH_VERSION) struct input_handle; /** * struct input_handler - implements one of interfaces for input devices * @private: driver-specific data * @event: event handler. This method is being called by input core with * interrupts disabled and dev->event_lock spinlock held and so * it may not sleep * @events: event sequence handler. This method is being called by * input core with interrupts disabled and dev->event_lock * spinlock held and so it may not sleep. The method must return * number of events passed to it. * @filter: similar to @event; separates normal event handlers from * "filters". * @match: called after comparing device's id with handler's id_table * to perform fine-grained matching between device and handler * @connect: called when attaching a handler to an input device * @disconnect: disconnects a handler from input device * @start: starts handler for given handle. This function is called by * input core right after connect() method and also when a process * that "grabbed" a device releases it * @passive_observer: set to %true by drivers only interested in observing * data stream from devices if there are other users present. Such * drivers will not result in starting underlying hardware device * when input_open_device() is called for their handles * @legacy_minors: set to %true by drivers using legacy minor ranges * @minor: beginning of range of 32 legacy minors for devices this driver * can provide * @name: name of the handler, to be shown in /proc/bus/input/handlers * @id_table: pointer to a table of input_device_ids this driver can * handle * @h_list: list of input handles associated with the handler * @node: for placing the driver onto input_handler_list * * Input handlers attach to input devices and create input handles. There * are likely several handlers attached to any given input device at the * same time. All of them will get their copy of input event generated by * the device. * * The very same structure is used to implement input filters. Input core * allows filters to run first and will not pass event to regular handlers * if any of the filters indicate that the event should be filtered (by * returning %true from their filter() method). * * Note that input core serializes calls to connect() and disconnect() * methods. */ struct input_handler { void *private; void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); unsigned int (*events)(struct input_handle *handle, struct input_value *vals, unsigned int count); bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value); bool (*match)(struct input_handler *handler, struct input_dev *dev); int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); void (*disconnect)(struct input_handle *handle); void (*start)(struct input_handle *handle); bool passive_observer; bool legacy_minors; int minor; const char *name; const struct input_device_id *id_table; struct list_head h_list; struct list_head node; }; /** * struct input_handle - links input device with an input handler * @private: handler-specific data * @open: counter showing whether the handle is 'open', i.e. should deliver * events from its device * @name: name given to the handle by handler that created it * @dev: input device the handle is attached to * @handler: handler that works with the device through this handle * @handle_events: event sequence handler. It is set up by the input core * according to event handling method specified in the @handler. See * input_handle_setup_event_handler(). * This method is being called by the input core with interrupts disabled * and dev->event_lock spinlock held and so it may not sleep. * @d_node: used to put the handle on device's list of attached handles * @h_node: used to put the handle on handler's list of handles from which * it gets events */ struct input_handle { void *private; int open; const char *name; struct input_dev *dev; struct input_handler *handler; unsigned int (*handle_events)(struct input_handle *handle, struct input_value *vals, unsigned int count); struct list_head d_node; struct list_head h_node; }; struct input_dev __must_check *input_allocate_device(void); struct input_dev __must_check *devm_input_allocate_device(struct device *); void input_free_device(struct input_dev *dev); static inline struct input_dev *input_get_device(struct input_dev *dev) { return dev ? to_input_dev(get_device(&dev->dev)) : NULL; } static inline void input_put_device(struct input_dev *dev) { if (dev) put_device(&dev->dev); } static inline void *input_get_drvdata(struct input_dev *dev) { return dev_get_drvdata(&dev->dev); } static inline void input_set_drvdata(struct input_dev *dev, void *data) { dev_set_drvdata(&dev->dev, data); } int __must_check input_register_device(struct input_dev *); void input_unregister_device(struct input_dev *); void input_reset_device(struct input_dev *); int input_setup_polling(struct input_dev *dev, void (*poll_fn)(struct input_dev *dev)); void input_set_poll_interval(struct input_dev *dev, unsigned int interval); void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval); void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval); int input_get_poll_interval(struct input_dev *dev); int __must_check input_register_handler(struct input_handler *); void input_unregister_handler(struct input_handler *); int __must_check input_get_new_minor(int legacy_base, unsigned int legacy_num, bool allow_dynamic); void input_free_minor(unsigned int minor); int input_handler_for_each_handle(struct input_handler *, void *data, int (*fn)(struct input_handle *, void *)); int input_register_handle(struct input_handle *); void input_unregister_handle(struct input_handle *); int input_grab_device(struct input_handle *); void input_release_device(struct input_handle *); int input_open_device(struct input_handle *); void input_close_device(struct input_handle *); int input_flush_device(struct input_handle *handle, struct file *file); void input_set_timestamp(struct input_dev *dev, ktime_t timestamp); ktime_t *input_get_timestamp(struct input_dev *dev); void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); static inline void input_report_key(struct input_dev *dev, unsigned int code, int value) { input_event(dev, EV_KEY, code, !!value); } static inline void input_report_rel(struct input_dev *dev, unsigned int code, int value) { input_event(dev, EV_REL, code, value); } static inline void input_report_abs(struct input_dev *dev, unsigned int code, int value) { input_event(dev, EV_ABS, code, value); } static inline void input_report_ff_status(struct input_dev *dev, unsigned int code, int value) { input_event(dev, EV_FF_STATUS, code, value); } static inline void input_report_switch(struct input_dev *dev, unsigned int code, int value) { input_event(dev, EV_SW, code, !!value); } static inline void input_sync(struct input_dev *dev) { input_event(dev, EV_SYN, SYN_REPORT, 0); } static inline void input_mt_sync(struct input_dev *dev) { input_event(dev, EV_SYN, SYN_MT_REPORT, 0); } void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); /** * input_set_events_per_packet - tell handlers about the driver event rate * @dev: the input device used by the driver * @n_events: the average number of events between calls to input_sync() * * If the event rate sent from a device is unusually large, use this * function to set the expected event rate. This will allow handlers * to set up an appropriate buffer size for the event stream, in order * to minimize information loss. */ static inline void input_set_events_per_packet(struct input_dev *dev, int n_events) { dev->hint_events_per_packet = n_events; } void input_alloc_absinfo(struct input_dev *dev); void input_set_abs_params(struct input_dev *dev, unsigned int axis, int min, int max, int fuzz, int flat); void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, const struct input_dev *src, unsigned int src_axis); #define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \ static inline int input_abs_get_##_suffix(struct input_dev *dev, \ unsigned int axis) \ { \ return dev->absinfo ? dev->absinfo[axis]._item : 0; \ } \ \ static inline void input_abs_set_##_suffix(struct input_dev *dev, \ unsigned int axis, int val) \ { \ input_alloc_absinfo(dev); \ if (dev->absinfo) \ dev->absinfo[axis]._item = val; \ } INPUT_GENERATE_ABS_ACCESSORS(val, value) INPUT_GENERATE_ABS_ACCESSORS(min, minimum) INPUT_GENERATE_ABS_ACCESSORS(max, maximum) INPUT_GENERATE_ABS_ACCESSORS(fuzz, fuzz) INPUT_GENERATE_ABS_ACCESSORS(flat, flat) INPUT_GENERATE_ABS_ACCESSORS(res, resolution) int input_scancode_to_scalar(const struct input_keymap_entry *ke, unsigned int *scancode); int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); int input_set_keycode(struct input_dev *dev, const struct input_keymap_entry *ke); bool input_match_device_id(const struct input_dev *dev, const struct input_device_id *id); void input_enable_softrepeat(struct input_dev *dev, int delay, int period); bool input_device_enabled(struct input_dev *dev); extern const struct class input_class; /** * struct ff_device - force-feedback part of an input device * @upload: Called to upload an new effect into device * @erase: Called to erase an effect from device * @playback: Called to request device to start playing specified effect * @set_gain: Called to set specified gain * @set_autocenter: Called to auto-center device * @destroy: called by input core when parent input device is being * destroyed * @private: driver-specific data, will be freed automatically * @ffbit: bitmap of force feedback capabilities truly supported by * device (not emulated like ones in input_dev->ffbit) * @mutex: mutex for serializing access to the device * @max_effects: maximum number of effects supported by device * @effects: pointer to an array of effects currently loaded into device * @effect_owners: array of effect owners; when file handle owning * an effect gets closed the effect is automatically erased * * Every force-feedback device must implement upload() and playback() * methods; erase() is optional. set_gain() and set_autocenter() need * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER * bits. * * Note that playback(), set_gain() and set_autocenter() are called with * dev->event_lock spinlock held and interrupts off and thus may not * sleep. */ struct ff_device { int (*upload)(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old); int (*erase)(struct input_dev *dev, int effect_id); int (*playback)(struct input_dev *dev, int effect_id, int value); void (*set_gain)(struct input_dev *dev, u16 gain); void (*set_autocenter)(struct input_dev *dev, u16 magnitude); void (*destroy)(struct ff_device *); void *private; unsigned long ffbit[BITS_TO_LONGS(FF_CNT)]; struct mutex mutex; int max_effects; struct ff_effect *effects; struct file *effect_owners[] __counted_by(max_effects); }; int input_ff_create(struct input_dev *dev, unsigned int max_effects); void input_ff_destroy(struct input_dev *dev); int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); int input_ff_flush(struct input_dev *dev, struct file *file); int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); #endif
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 /* SPDX-License-Identifier: GPL-2.0-only */ /* * A policy database (policydb) specifies the * configuration data for the security policy. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ /* * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com> * Support for enhanced MLS infrastructure. * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. * * Updated: Frank Mayer <mayerf@tresys.com> and * Karl MacMillan <kmacmillan@tresys.com> * Added conditional policy language extensions * Copyright (C) 2003-2004 Tresys Technology, LLC */ #ifndef _SS_POLICYDB_H_ #define _SS_POLICYDB_H_ #include "symtab.h" #include "avtab.h" #include "sidtab.h" #include "ebitmap.h" #include "mls_types.h" #include "context.h" #include "constraint.h" /* * A datum type is defined for each kind of symbol * in the configuration data: individual permissions, * common prefixes for access vectors, classes, * users, roles, types, sensitivities, categories, etc. */ /* Permission attributes */ struct perm_datum { u32 value; /* permission bit + 1 */ }; /* Attributes of a common prefix for access vectors */ struct common_datum { u32 value; /* internal common value */ struct symtab permissions; /* common permissions */ }; /* Class attributes */ struct class_datum { u32 value; /* class value */ char *comkey; /* common name */ struct common_datum *comdatum; /* common datum */ struct symtab permissions; /* class-specific permission symbol table */ struct constraint_node *constraints; /* constraints on class perms */ struct constraint_node *validatetrans; /* special transition rules */ /* Options how a new object user, role, and type should be decided */ #define DEFAULT_SOURCE 1 #define DEFAULT_TARGET 2 char default_user; char default_role; char default_type; /* Options how a new object range should be decided */ #define DEFAULT_SOURCE_LOW 1 #define DEFAULT_SOURCE_HIGH 2 #define DEFAULT_SOURCE_LOW_HIGH 3 #define DEFAULT_TARGET_LOW 4 #define DEFAULT_TARGET_HIGH 5 #define DEFAULT_TARGET_LOW_HIGH 6 #define DEFAULT_GLBLUB 7 char default_range; }; /* Role attributes */ struct role_datum { u32 value; /* internal role value */ u32 bounds; /* boundary of role */ struct ebitmap dominates; /* set of roles dominated by this role */ struct ebitmap types; /* set of authorized types for role */ }; struct role_trans_key { u32 role; /* current role */ u32 type; /* program executable type, or new object type */ u32 tclass; /* process class, or new object class */ }; struct role_trans_datum { u32 new_role; /* new role */ }; struct filename_trans_key { u32 ttype; /* parent dir context */ u16 tclass; /* class of new object */ const char *name; /* last path component */ }; struct filename_trans_datum { struct ebitmap stypes; /* bitmap of source types for this otype */ u32 otype; /* resulting type of new object */ struct filename_trans_datum *next; /* record for next otype*/ }; struct role_allow { u32 role; /* current role */ u32 new_role; /* new role */ struct role_allow *next; }; /* Type attributes */ struct type_datum { u32 value; /* internal type value */ u32 bounds; /* boundary of type */ unsigned char primary; /* primary name? */ unsigned char attribute; /* attribute ?*/ }; /* User attributes */ struct user_datum { u32 value; /* internal user value */ u32 bounds; /* bounds of user */ struct ebitmap roles; /* set of authorized roles for user */ struct mls_range range; /* MLS range (min - max) for user */ struct mls_level dfltlevel; /* default login MLS level for user */ }; /* Sensitivity attributes */ struct level_datum { struct mls_level level; /* sensitivity and associated categories */ unsigned char isalias; /* is this sensitivity an alias for another? */ }; /* Category attributes */ struct cat_datum { u32 value; /* internal category bit + 1 */ unsigned char isalias; /* is this category an alias for another? */ }; struct range_trans { u32 source_type; u32 target_type; u32 target_class; }; /* Boolean data type */ struct cond_bool_datum { u32 value; /* internal type value */ int state; }; struct cond_node; /* * type set preserves data needed to determine constraint info from * policy source. This is not used by the kernel policy but allows * utilities such as audit2allow to determine constraint denials. */ struct type_set { struct ebitmap types; struct ebitmap negset; u32 flags; }; /* * The configuration data includes security contexts for * initial SIDs, unlabeled file systems, TCP and UDP port numbers, * network interfaces, and nodes. This structure stores the * relevant data for one such entry. Entries of the same kind * (e.g. all initial SIDs) are linked together into a list. */ struct ocontext { union { char *name; /* name of initial SID, fs, netif, fstype, path */ struct { u8 protocol; u16 low_port; u16 high_port; } port; /* TCP or UDP port information */ struct { u32 addr; u32 mask; } node; /* node information */ struct { u32 addr[4]; u32 mask[4]; } node6; /* IPv6 node information */ struct { u64 subnet_prefix; u16 low_pkey; u16 high_pkey; } ibpkey; struct { char *dev_name; u8 port; } ibendport; } u; union { u32 sclass; /* security class for genfs */ u32 behavior; /* labeling behavior for fs_use */ } v; struct context context[2]; /* security context(s) */ u32 sid[2]; /* SID(s) */ struct ocontext *next; }; struct genfs { char *fstype; struct ocontext *head; struct genfs *next; }; /* symbol table array indices */ #define SYM_COMMONS 0 #define SYM_CLASSES 1 #define SYM_ROLES 2 #define SYM_TYPES 3 #define SYM_USERS 4 #define SYM_BOOLS 5 #define SYM_LEVELS 6 #define SYM_CATS 7 #define SYM_NUM 8 /* object context array indices */ #define OCON_ISID 0 /* initial SIDs */ #define OCON_FS 1 /* unlabeled file systems (deprecated) */ #define OCON_PORT 2 /* TCP and UDP port numbers */ #define OCON_NETIF 3 /* network interfaces */ #define OCON_NODE 4 /* nodes */ #define OCON_FSUSE 5 /* fs_use */ #define OCON_NODE6 6 /* IPv6 nodes */ #define OCON_IBPKEY 7 /* Infiniband PKeys */ #define OCON_IBENDPORT 8 /* Infiniband end ports */ #define OCON_NUM 9 /* The policy database */ struct policydb { int mls_enabled; /* symbol tables */ struct symtab symtab[SYM_NUM]; #define p_commons symtab[SYM_COMMONS] #define p_classes symtab[SYM_CLASSES] #define p_roles symtab[SYM_ROLES] #define p_types symtab[SYM_TYPES] #define p_users symtab[SYM_USERS] #define p_bools symtab[SYM_BOOLS] #define p_levels symtab[SYM_LEVELS] #define p_cats symtab[SYM_CATS] /* symbol names indexed by (value - 1) */ char **sym_val_to_name[SYM_NUM]; /* class, role, and user attributes indexed by (value - 1) */ struct class_datum **class_val_to_struct; struct role_datum **role_val_to_struct; struct user_datum **user_val_to_struct; struct type_datum **type_val_to_struct; /* type enforcement access vectors and transitions */ struct avtab te_avtab; /* role transitions */ struct hashtab role_tr; /* file transitions with the last path component */ /* quickly exclude lookups when parent ttype has no rules */ struct ebitmap filename_trans_ttypes; /* actual set of filename_trans rules */ struct hashtab filename_trans; /* only used if policyvers < POLICYDB_VERSION_COMP_FTRANS */ u32 compat_filename_trans_count; /* bools indexed by (value - 1) */ struct cond_bool_datum **bool_val_to_struct; /* type enforcement conditional access vectors and transitions */ struct avtab te_cond_avtab; /* array indexing te_cond_avtab by conditional */ struct cond_node *cond_list; u32 cond_list_len; /* role allows */ struct role_allow *role_allow; /* security contexts of initial SIDs, unlabeled file systems, TCP or UDP port numbers, network interfaces and nodes */ struct ocontext *ocontexts[OCON_NUM]; /* security contexts for files in filesystems that cannot support a persistent label mapping or use another fixed labeling behavior. */ struct genfs *genfs; /* range transitions table (range_trans_key -> mls_range) */ struct hashtab range_tr; /* type -> attribute reverse mapping */ struct ebitmap *type_attr_map_array; struct ebitmap policycaps; struct ebitmap permissive_map; /* length of this policy when it was loaded */ size_t len; unsigned int policyvers; unsigned int reject_unknown : 1; unsigned int allow_unknown : 1; u16 process_class; u32 process_trans_perms; } __randomize_layout; struct policy_file { char *data; size_t len; }; extern void policydb_destroy(struct policydb *p); extern int policydb_load_isids(struct policydb *p, struct sidtab *s); extern int policydb_context_isvalid(struct policydb *p, struct context *c); extern int policydb_class_isvalid(struct policydb *p, unsigned int class); extern int policydb_type_isvalid(struct policydb *p, unsigned int type); extern int policydb_role_isvalid(struct policydb *p, unsigned int role); extern int policydb_read(struct policydb *p, struct policy_file *fp); extern int policydb_write(struct policydb *p, struct policy_file *fp); extern struct filename_trans_datum * policydb_filenametr_search(struct policydb *p, struct filename_trans_key *key); extern struct mls_range *policydb_rangetr_search(struct policydb *p, struct range_trans *key); extern struct role_trans_datum * policydb_roletr_search(struct policydb *p, struct role_trans_key *key); #define POLICYDB_CONFIG_MLS 1 /* the config flags related to unknown classes/perms are bits 2 and 3 */ #define REJECT_UNKNOWN 0x00000002 #define ALLOW_UNKNOWN 0x00000004 #define OBJECT_R "object_r" #define OBJECT_R_VAL 1 #define POLICYDB_MAGIC SELINUX_MAGIC #define POLICYDB_STRING "SE Linux" struct policy_data { struct policydb *p; struct policy_file *fp; }; static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) { if (bytes > fp->len) return -EINVAL; memcpy(buf, fp->data, bytes); fp->data += bytes; fp->len -= bytes; return 0; } static inline int put_entry(const void *buf, size_t bytes, size_t num, struct policy_file *fp) { size_t len; if (unlikely(check_mul_overflow(bytes, num, &len))) return -EINVAL; if (len > fp->len) return -EINVAL; memcpy(fp->data, buf, len); fp->data += len; fp->len -= len; return 0; } static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr) { return p->sym_val_to_name[sym_num][element_nr]; } extern int str_read(char **strp, gfp_t flags, struct policy_file *fp, u32 len); extern u16 string_to_security_class(struct policydb *p, const char *name); extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); #endif /* _SS_POLICYDB_H_ */
7 8 8 11 16 11 16 14 16 7 7 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 // SPDX-License-Identifier: GPL-2.0-only /* * mm/page-writeback.c * * Copyright (C) 2002, Linus Torvalds. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Contains functions related to writing back dirty pages at the * address_space level. * * 10Apr2002 Andrew Morton * Initial version */ #include <linux/kernel.h> #include <linux/math64.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/init.h> #include <linux/backing-dev.h> #include <linux/task_io_accounting_ops.h> #include <linux/blkdev.h> #include <linux/mpage.h> #include <linux/rmap.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/syscalls.h> #include <linux/pagevec.h> #include <linux/timer.h> #include <linux/sched/rt.h> #include <linux/sched/signal.h> #include <linux/mm_inline.h> #include <trace/events/writeback.h> #include "internal.h" /* * Sleep at most 200ms at a time in balance_dirty_pages(). */ #define MAX_PAUSE max(HZ/5, 1) /* * Try to keep balance_dirty_pages() call intervals higher than this many pages * by raising pause time to max_pause when falls below it. */ #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10)) /* * Estimate write bandwidth or update dirty limit at 200ms intervals. */ #define BANDWIDTH_INTERVAL max(HZ/5, 1) #define RATELIMIT_CALC_SHIFT 10 /* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. */ static long ratelimit_pages = 32; /* The following parameters are exported via /proc/sys/vm */ /* * Start background writeback (via writeback threads) at this percentage */ static int dirty_background_ratio = 10; /* * dirty_background_bytes starts at 0 (disabled) so that it is a function of * dirty_background_ratio * the amount of dirtyable memory */ static unsigned long dirty_background_bytes; /* * free highmem will not be subtracted from the total free memory * for calculating free ratios if vm_highmem_is_dirtyable is true */ static int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ static int vm_dirty_ratio = 20; /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of * vm_dirty_ratio * the amount of dirtyable memory */ static unsigned long vm_dirty_bytes; /* * The interval between `kupdate'-style writebacks */ unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ EXPORT_SYMBOL_GPL(dirty_writeback_interval); /* * The longest time for which data is allowed to remain dirty */ unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ /* * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: * a full sync is triggered after this time elapses without any disk activity. */ int laptop_mode; EXPORT_SYMBOL(laptop_mode); /* End of sysctl-exported parameters */ struct wb_domain global_wb_domain; /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will * reflect changes in current writeout rate. */ #define VM_COMPLETIONS_PERIOD_LEN (3*HZ) #ifdef CONFIG_CGROUP_WRITEBACK #define GDTC_INIT(__wb) .wb = (__wb), \ .dom = &global_wb_domain, \ .wb_completions = &(__wb)->completions #define GDTC_INIT_NO_WB .dom = &global_wb_domain #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \ .dom = mem_cgroup_wb_domain(__wb), \ .wb_completions = &(__wb)->memcg_completions, \ .gdtc = __gdtc static bool mdtc_valid(struct dirty_throttle_control *dtc) { return dtc->dom; } static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc) { return dtc->dom; } static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc) { return mdtc->gdtc; } static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) { return &wb->memcg_completions; } static void wb_min_max_ratio(struct bdi_writeback *wb, unsigned long *minp, unsigned long *maxp) { unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); unsigned long long min = wb->bdi->min_ratio; unsigned long long max = wb->bdi->max_ratio; /* * @wb may already be clean by the time control reaches here and * the total may not include its bw. */ if (this_bw < tot_bw) { if (min) { min *= this_bw; min = div64_ul(min, tot_bw); } if (max < 100 * BDI_RATIO_SCALE) { max *= this_bw; max = div64_ul(max, tot_bw); } } *minp = min; *maxp = max; } #else /* CONFIG_CGROUP_WRITEBACK */ #define GDTC_INIT(__wb) .wb = (__wb), \ .wb_completions = &(__wb)->completions #define GDTC_INIT_NO_WB #define MDTC_INIT(__wb, __gdtc) static bool mdtc_valid(struct dirty_throttle_control *dtc) { return false; } static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc) { return &global_wb_domain; } static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc) { return NULL; } static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) { return NULL; } static void wb_min_max_ratio(struct bdi_writeback *wb, unsigned long *minp, unsigned long *maxp) { *minp = wb->bdi->min_ratio; *maxp = wb->bdi->max_ratio; } #endif /* CONFIG_CGROUP_WRITEBACK */ /* * In a memory zone, there is a certain amount of pages we consider * available for the page cache, which is essentially the number of * free and reclaimable pages, minus some zone reserves to protect * lowmem and the ability to uphold the zone's watermarks without * requiring writeback. * * This number of dirtyable pages is the base value of which the * user-configurable dirty ratio is the effective number of pages that * are allowed to be actually dirtied. Per individual zone, or * globally by using the sum of dirtyable pages over all zones. * * Because the user is allowed to specify the dirty limit globally as * absolute number of bytes, calculating the per-zone dirty limit can * require translating the configured limit into a percentage of * global dirtyable memory first. */ /** * node_dirtyable_memory - number of dirtyable pages in a node * @pgdat: the node * * Return: the node's number of pages potentially available for dirty * page cache. This is the base value for the per-node dirty limits. */ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) { unsigned long nr_pages = 0; int z; for (z = 0; z < MAX_NR_ZONES; z++) { struct zone *zone = pgdat->node_zones + z; if (!populated_zone(zone)) continue; nr_pages += zone_page_state(zone, NR_FREE_PAGES); } /* * Pages reserved for the kernel should not be considered * dirtyable, to prevent a situation where reclaim has to * clean pages in order to balance the zones. */ nr_pages -= min(nr_pages, pgdat->totalreserve_pages); nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); return nr_pages; } static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; unsigned long x = 0; int i; for_each_node_state(node, N_HIGH_MEMORY) { for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { struct zone *z; unsigned long nr_pages; if (!is_highmem_idx(i)) continue; z = &NODE_DATA(node)->node_zones[i]; if (!populated_zone(z)) continue; nr_pages = zone_page_state(z, NR_FREE_PAGES); /* watch for underflows */ nr_pages -= min(nr_pages, high_wmark_pages(z)); nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE); x += nr_pages; } } /* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure * that this does not occur. */ return min(x, total); #else return 0; #endif } /** * global_dirtyable_memory - number of globally dirtyable pages * * Return: the global number of pages potentially available for dirty * page cache. This is the base value for the global dirty limits. */ static unsigned long global_dirtyable_memory(void) { unsigned long x; x = global_zone_page_state(NR_FREE_PAGES); /* * Pages reserved for the kernel should not be considered * dirtyable, to prevent a situation where reclaim has to * clean pages in order to balance the zones. */ x -= min(x, totalreserve_pages); x += global_node_page_state(NR_INACTIVE_FILE); x += global_node_page_state(NR_ACTIVE_FILE); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); return x + 1; /* Ensure that we never return 0 */ } /** * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain * @dtc: dirty_throttle_control of interest * * Calculate @dtc->thresh and ->bg_thresh considering * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller * must ensure that @dtc->avail is set before calling this function. The * dirty limits will be lifted by 1/4 for real-time tasks. */ static void domain_dirty_limits(struct dirty_throttle_control *dtc) { const unsigned long available_memory = dtc->avail; struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); unsigned long bytes = vm_dirty_bytes; unsigned long bg_bytes = dirty_background_bytes; /* convert ratios to per-PAGE_SIZE for higher precision */ unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100; unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100; unsigned long thresh; unsigned long bg_thresh; struct task_struct *tsk; /* gdtc is !NULL iff @dtc is for memcg domain */ if (gdtc) { unsigned long global_avail = gdtc->avail; /* * The byte settings can't be applied directly to memcg * domains. Convert them to ratios by scaling against * globally available memory. As the ratios are in * per-PAGE_SIZE, they can be obtained by dividing bytes by * number of pages. */ if (bytes) ratio = min(DIV_ROUND_UP(bytes, global_avail), PAGE_SIZE); if (bg_bytes) bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail), PAGE_SIZE); bytes = bg_bytes = 0; } if (bytes) thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); else thresh = (ratio * available_memory) / PAGE_SIZE; if (bg_bytes) bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); else bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; tsk = current; if (rt_or_dl_task(tsk)) { bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; } /* * Dirty throttling logic assumes the limits in page units fit into * 32-bits. This gives 16TB dirty limits max which is hopefully enough. */ if (thresh > UINT_MAX) thresh = UINT_MAX; /* This makes sure bg_thresh is within 32-bits as well */ if (bg_thresh >= thresh) bg_thresh = thresh / 2; dtc->thresh = thresh; dtc->bg_thresh = bg_thresh; /* we should eventually report the domain in the TP */ if (!gdtc) trace_global_dirty_state(bg_thresh, thresh); } /** * global_dirty_limits - background-writeback and dirty-throttling thresholds * @pbackground: out parameter for bg_thresh * @pdirty: out parameter for thresh * * Calculate bg_thresh and thresh for global_wb_domain. See * domain_dirty_limits() for details. */ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) { struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB }; gdtc.avail = global_dirtyable_memory(); domain_dirty_limits(&gdtc); *pbackground = gdtc.bg_thresh; *pdirty = gdtc.thresh; } /** * node_dirty_limit - maximum number of dirty pages allowed in a node * @pgdat: the node * * Return: the maximum number of dirty pages allowed in a node, based * on the node's dirtyable memory. */ static unsigned long node_dirty_limit(struct pglist_data *pgdat) { unsigned long node_memory = node_dirtyable_memory(pgdat); struct task_struct *tsk = current; unsigned long dirty; if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * node_memory / global_dirtyable_memory(); else dirty = vm_dirty_ratio * node_memory / 100; if (rt_or_dl_task(tsk)) dirty += dirty / 4; /* * Dirty throttling logic assumes the limits in page units fit into * 32-bits. This gives 16TB dirty limits max which is hopefully enough. */ return min_t(unsigned long, dirty, UINT_MAX); } /** * node_dirty_ok - tells whether a node is within its dirty limits * @pgdat: the node to check * * Return: %true when the dirty pages in @pgdat are within the node's * dirty limit, %false if the limit is exceeded. */ bool node_dirty_ok(struct pglist_data *pgdat) { unsigned long limit = node_dirty_limit(pgdat); unsigned long nr_pages = 0; nr_pages += node_page_state(pgdat, NR_FILE_DIRTY); nr_pages += node_page_state(pgdat, NR_WRITEBACK); return nr_pages <= limit; } #ifdef CONFIG_SYSCTL static int dirty_background_ratio_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) dirty_background_bytes = 0; return ret; } static int dirty_background_bytes_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; unsigned long old_bytes = dirty_background_bytes; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) { if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) > UINT_MAX) { dirty_background_bytes = old_bytes; return -ERANGE; } dirty_background_ratio = 0; } return ret; } static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int old_ratio = vm_dirty_ratio; int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_ratio != old_ratio) { writeback_set_ratelimit(); vm_dirty_bytes = 0; } return ret; } static int dirty_bytes_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned long old_bytes = vm_dirty_bytes; int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_bytes != old_bytes) { if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) { vm_dirty_bytes = old_bytes; return -ERANGE; } writeback_set_ratelimit(); vm_dirty_ratio = 0; } return ret; } #endif static unsigned long wp_next_time(unsigned long cur_time) { cur_time += VM_COMPLETIONS_PERIOD_LEN; /* 0 has a special meaning... */ if (!cur_time) return 1; return cur_time; } static void wb_domain_writeout_add(struct wb_domain *dom, struct fprop_local_percpu *completions, unsigned int max_prop_frac, long nr) { __fprop_add_percpu_max(&dom->completions, completions, max_prop_frac, nr); /* First event after period switching was turned off? */ if (unlikely(!dom->period_time)) { /* * We can race with other wb_domain_writeout_add calls here but * it does not cause any harm since the resulting time when * timer will fire and what is in writeout_period_time will be * roughly the same. */ dom->period_time = wp_next_time(jiffies); mod_timer(&dom->period_timer, dom->period_time); } } /* * Increment @wb's writeout completion count and the global writeout * completion count. Called from __folio_end_writeback(). */ static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr) { struct wb_domain *cgdom; wb_stat_mod(wb, WB_WRITTEN, nr); wb_domain_writeout_add(&global_wb_domain, &wb->completions, wb->bdi->max_prop_frac, nr); cgdom = mem_cgroup_wb_domain(wb); if (cgdom) wb_domain_writeout_add(cgdom, wb_memcg_completions(wb), wb->bdi->max_prop_frac, nr); } void wb_writeout_inc(struct bdi_writeback *wb) { unsigned long flags; local_irq_save(flags); __wb_writeout_add(wb, 1); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(wb_writeout_inc); /* * On idle system, we can be called long after we scheduled because we use * deferred timers so count with missed periods. */ static void writeout_period(struct timer_list *t) { struct wb_domain *dom = from_timer(dom, t, period_timer); int miss_periods = (jiffies - dom->period_time) / VM_COMPLETIONS_PERIOD_LEN; if (fprop_new_period(&dom->completions, miss_periods + 1)) { dom->period_time = wp_next_time(dom->period_time + miss_periods * VM_COMPLETIONS_PERIOD_LEN); mod_timer(&dom->period_timer, dom->period_time); } else { /* * Aging has zeroed all fractions. Stop wasting CPU on period * updates. */ dom->period_time = 0; } } int wb_domain_init(struct wb_domain *dom, gfp_t gfp) { memset(dom, 0, sizeof(*dom)); spin_lock_init(&dom->lock); timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE); dom->dirty_limit_tstamp = jiffies; return fprop_global_init(&dom->completions, gfp); } #ifdef CONFIG_CGROUP_WRITEBACK void wb_domain_exit(struct wb_domain *dom) { timer_delete_sync(&dom->period_timer); fprop_global_destroy(&dom->completions); } #endif /* * bdi_min_ratio keeps the sum of the minimum dirty shares of all * registered backing devices, which, for obvious reasons, can not * exceed 100%. */ static unsigned int bdi_min_ratio; static int bdi_check_pages_limit(unsigned long pages) { unsigned long max_dirty_pages = global_dirtyable_memory(); if (pages > max_dirty_pages) return -EINVAL; return 0; } static unsigned long bdi_ratio_from_pages(unsigned long pages) { unsigned long background_thresh; unsigned long dirty_thresh; unsigned long ratio; global_dirty_limits(&background_thresh, &dirty_thresh); if (!dirty_thresh) return -EINVAL; ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh); return ratio; } static u64 bdi_get_bytes(unsigned int ratio) { unsigned long background_thresh; unsigned long dirty_thresh; u64 bytes; global_dirty_limits(&background_thresh, &dirty_thresh); bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100; return bytes; } static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) { unsigned int delta; int ret = 0; if (min_ratio > 100 * BDI_RATIO_SCALE) return -EINVAL; spin_lock_bh(&bdi_lock); if (min_ratio > bdi->max_ratio) { ret = -EINVAL; } else { if (min_ratio < bdi->min_ratio) { delta = bdi->min_ratio - min_ratio; bdi_min_ratio -= delta; bdi->min_ratio = min_ratio; } else { delta = min_ratio - bdi->min_ratio; if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) { bdi_min_ratio += delta; bdi->min_ratio = min_ratio; } else { ret = -EINVAL; } } } spin_unlock_bh(&bdi_lock); return ret; } static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio) { int ret = 0; if (max_ratio > 100 * BDI_RATIO_SCALE) return -EINVAL; spin_lock_bh(&bdi_lock); if (bdi->min_ratio > max_ratio) { ret = -EINVAL; } else { bdi->max_ratio = max_ratio; bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / (100 * BDI_RATIO_SCALE); } spin_unlock_bh(&bdi_lock); return ret; } int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio) { return __bdi_set_min_ratio(bdi, min_ratio); } int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio) { return __bdi_set_max_ratio(bdi, max_ratio); } int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) { return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE); } int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio) { return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE); } EXPORT_SYMBOL(bdi_set_max_ratio); u64 bdi_get_min_bytes(struct backing_dev_info *bdi) { return bdi_get_bytes(bdi->min_ratio); } int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes) { int ret; unsigned long pages = min_bytes >> PAGE_SHIFT; long min_ratio; ret = bdi_check_pages_limit(pages); if (ret) return ret; min_ratio = bdi_ratio_from_pages(pages); if (min_ratio < 0) return min_ratio; return __bdi_set_min_ratio(bdi, min_ratio); } u64 bdi_get_max_bytes(struct backing_dev_info *bdi) { return bdi_get_bytes(bdi->max_ratio); } int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes) { int ret; unsigned long pages = max_bytes >> PAGE_SHIFT; long max_ratio; ret = bdi_check_pages_limit(pages); if (ret) return ret; max_ratio = bdi_ratio_from_pages(pages); if (max_ratio < 0) return max_ratio; return __bdi_set_max_ratio(bdi, max_ratio); } int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit) { if (strict_limit > 1) return -EINVAL; spin_lock_bh(&bdi_lock); if (strict_limit) bdi->capabilities |= BDI_CAP_STRICTLIMIT; else bdi->capabilities &= ~BDI_CAP_STRICTLIMIT; spin_unlock_bh(&bdi_lock); return 0; } static unsigned long dirty_freerun_ceiling(unsigned long thresh, unsigned long bg_thresh) { return (thresh + bg_thresh) / 2; } static unsigned long hard_dirty_limit(struct wb_domain *dom, unsigned long thresh) { return max(thresh, dom->dirty_limit); } /* * Memory which can be further allocated to a memcg domain is capped by * system-wide clean memory excluding the amount being used in the domain. */ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, unsigned long filepages, unsigned long headroom) { struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); unsigned long clean = filepages - min(filepages, mdtc->dirty); unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); unsigned long other_clean = global_clean - min(global_clean, clean); mdtc->avail = filepages + min(headroom, other_clean); } static inline bool dtc_is_global(struct dirty_throttle_control *dtc) { return mdtc_gdtc(dtc) == NULL; } /* * Dirty background will ignore pages being written as we're trying to * decide whether to put more under writeback. */ static void domain_dirty_avail(struct dirty_throttle_control *dtc, bool include_writeback) { if (dtc_is_global(dtc)) { dtc->avail = global_dirtyable_memory(); dtc->dirty = global_node_page_state(NR_FILE_DIRTY); if (include_writeback) dtc->dirty += global_node_page_state(NR_WRITEBACK); } else { unsigned long filepages = 0, headroom = 0, writeback = 0; mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty, &writeback); if (include_writeback) dtc->dirty += writeback; mdtc_calc_avail(dtc, filepages, headroom); } } /** * __wb_calc_thresh - @wb's share of dirty threshold * @dtc: dirty_throttle_context of interest * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc * * Note that balance_dirty_pages() will only seriously take dirty throttling * threshold as a hard limit when sleeping max_pause per page is not enough * to keep the dirty pages under control. For example, when the device is * completely stalled due to some error conditions, or when there are 1000 * dd tasks writing to a slow 10MB/s USB key. * In the other normal situations, it acts more gently by throttling the tasks * more (rather than completely block them) when the wb dirty pages go high. * * It allocates high/low dirty limits to fast/slow devices, in order to prevent * - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * * The wb's share of dirty limit will be adapting to its throughput and * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. * * Return: @wb's dirty limit in pages. For dirty throttling limit, the term * "dirty" in the context of dirty balancing includes all PG_dirty and * PG_writeback pages. */ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc, unsigned long thresh) { struct wb_domain *dom = dtc_dom(dtc); struct bdi_writeback *wb = dtc->wb; u64 wb_thresh; u64 wb_max_thresh; unsigned long numerator, denominator; unsigned long wb_min_ratio, wb_max_ratio; /* * Calculate this wb's share of the thresh ratio. */ fprop_fraction_percpu(&dom->completions, dtc->wb_completions, &numerator, &denominator); wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE); wb_thresh *= numerator; wb_thresh = div64_ul(wb_thresh, denominator); wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio); wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE); /* * It's very possible that wb_thresh is close to 0 not because the * device is slow, but that it has remained inactive for long time. * Honour such devices a reasonable good (hopefully IO efficient) * threshold, so that the occasional writes won't be blocked and active * writes can rampup the threshold quickly. */ if (thresh > dtc->dirty) { if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100); else wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8); } wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE); if (wb_thresh > wb_max_thresh) wb_thresh = wb_max_thresh; return wb_thresh; } unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) { struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; domain_dirty_avail(&gdtc, true); return __wb_calc_thresh(&gdtc, thresh); } unsigned long cgwb_calc_thresh(struct bdi_writeback *wb) { struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB }; struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) }; domain_dirty_avail(&gdtc, true); domain_dirty_avail(&mdtc, true); domain_dirty_limits(&mdtc); return __wb_calc_thresh(&mdtc, mdtc.thresh); } /* * setpoint - dirty 3 * f(dirty) := 1.0 + (----------------) * limit - setpoint * * it's a 3rd order polynomial that subjects to * * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast * (2) f(setpoint) = 1.0 => the balance point * (3) f(limit) = 0 => the hard limit * (4) df/dx <= 0 => negative feedback control * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) * => fast response on large errors; small oscillation near setpoint */ static long long pos_ratio_polynom(unsigned long setpoint, unsigned long dirty, unsigned long limit) { long long pos_ratio; long x; x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, (limit - setpoint) | 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio += 1 << RATELIMIT_CALC_SHIFT; return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT); } /* * Dirty position control. * * (o) global/bdi setpoints * * We want the dirty pages be balanced around the global/wb setpoints. * When the number of dirty pages is higher/lower than the setpoint, the * dirty position control ratio (and hence task dirty ratelimit) will be * decreased/increased to bring the dirty pages back to the setpoint. * * pos_ratio = 1 << RATELIMIT_CALC_SHIFT * * if (dirty < setpoint) scale up pos_ratio * if (dirty > setpoint) scale down pos_ratio * * if (wb_dirty < wb_setpoint) scale up pos_ratio * if (wb_dirty > wb_setpoint) scale down pos_ratio * * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT * * (o) global control line * * ^ pos_ratio * | * | |<===== global dirty control scope ======>| * 2.0 * * * * * * * * | .* * | . * * | . * * | . * * | . * * | . * * 1.0 ................................* * | . . * * | . . * * | . . * * | . . * * | . . * * 0 +------------.------------------.----------------------*-------------> * freerun^ setpoint^ limit^ dirty pages * * (o) wb control line * * ^ pos_ratio * | * | * * | * * | * * | * * | * |<=========== span ============>| * 1.0 .......................* * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * 1/4 ...............................................* * * * * * * * * * * * * | . . * | . . * | . . * 0 +----------------------.-------------------------------.-------------> * wb_setpoint^ x_intercept^ * * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can * be smoothly throttled down to normal if it starts high in situations like * - start writing to a slow SD card and a fast disk at the same time. The SD * card's wb_dirty may rush to many times higher than wb_setpoint. * - the wb dirty thresh drops quickly due to change of JBOD workload */ static void wb_position_ratio(struct dirty_throttle_control *dtc) { struct bdi_writeback *wb = dtc->wb; unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsigned long wb_thresh = dtc->wb_thresh; unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ unsigned long wb_setpoint; unsigned long span; long long pos_ratio; /* for scaling up/down the rate limit */ long x; dtc->pos_ratio = 0; if (unlikely(dtc->dirty >= limit)) return; /* * global setpoint * * See comment for pos_ratio_polynom(). */ setpoint = (freerun + limit) / 2; pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit); /* * The strictlimit feature is a tool preventing mistrusted filesystems * from growing a large number of dirty pages before throttling. For * such filesystems balance_dirty_pages always checks wb counters * against wb limits. Even if global "nr_dirty" is under "freerun". * This is especially important for fuse which sets bdi->max_ratio to * 1% by default. Without strictlimit feature, fuse writeback may * consume arbitrary amount of RAM because it is accounted in * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty". * * Here, in wb_position_ratio(), we calculate pos_ratio based on * two values: wb_dirty and wb_thresh. Let's consider an example: * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global * limits are set by default to 10% and 20% (background and throttle). * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is * about ~6K pages (as the average of background and throttle wb * limits). The 3rd order polynomial will provide positive feedback if * wb_dirty is under wb_setpoint and vice versa. * * Note, that we cannot use global counters in these calculations * because we want to throttle process writing to a strictlimit wb * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB * in the example above). */ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { long long wb_pos_ratio; if (dtc->wb_dirty >= wb_thresh) return; wb_setpoint = dirty_freerun_ceiling(wb_thresh, dtc->wb_bg_thresh); if (wb_setpoint == 0 || wb_setpoint == wb_thresh) return; wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty, wb_thresh); /* * Typically, for strictlimit case, wb_setpoint << setpoint * and pos_ratio >> wb_pos_ratio. In the other words global * state ("dirty") is not limiting factor and we have to * make decision based on wb counters. But there is an * important case when global pos_ratio should get precedence: * global limits are exceeded (e.g. due to activities on other * wb's) while given strictlimit wb is below limit. * * "pos_ratio * wb_pos_ratio" would work for the case above, * but it would look too non-natural for the case of all * activity in the system coming from a single strictlimit wb * with bdi->max_ratio == 100%. * * Note that min() below somewhat changes the dynamics of the * control system. Normally, pos_ratio value can be well over 3 * (when globally we are at freerun and wb is well below wb * setpoint). Now the maximum pos_ratio in the same situation * is 2. We might want to tweak this if we observe the control * system is too slow to adapt. */ dtc->pos_ratio = min(pos_ratio, wb_pos_ratio); return; } /* * We have computed basic pos_ratio above based on global situation. If * the wb is over/under its share of dirty pages, we want to scale * pos_ratio further down/up. That is done by the following mechanism. */ /* * wb setpoint * * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint) * * x_intercept - wb_dirty * := -------------------------- * x_intercept - wb_setpoint * * The main wb control line is a linear function that subjects to * * (1) f(wb_setpoint) = 1.0 * (2) k = - 1 / (8 * write_bw) (in single wb case) * or equally: x_intercept = wb_setpoint + 8 * write_bw * * For single wb case, the dirty pages are observed to fluctuate * regularly within range * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2] * for various filesystems, where (2) can yield in a reasonable 12.5% * fluctuation range for pos_ratio. * * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its * own size, so move the slope over accordingly and choose a slope that * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh. */ if (unlikely(wb_thresh > dtc->thresh)) wb_thresh = dtc->thresh; /* * scale global setpoint to wb's: * wb_setpoint = setpoint * wb_thresh / thresh */ x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1); wb_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single wb case as indicated by * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case. * * wb_thresh thresh - wb_thresh * span = --------- * (8 * write_bw) + ------------------ * wb_thresh * thresh thresh */ span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16; x_intercept = wb_setpoint + span; if (dtc->wb_dirty < x_intercept - span / 4) { pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty), (x_intercept - wb_setpoint) | 1); } else pos_ratio /= 4; /* * wb reserve area, safeguard against dirty pool underrun and disk idle * It may push the desired control point of global dirty pages higher * than setpoint. */ x_intercept = wb_thresh / 2; if (dtc->wb_dirty < x_intercept) { if (dtc->wb_dirty > x_intercept / 8) pos_ratio = div_u64(pos_ratio * x_intercept, dtc->wb_dirty); else pos_ratio *= 8; } dtc->pos_ratio = pos_ratio; } static void wb_update_write_bandwidth(struct bdi_writeback *wb, unsigned long elapsed, unsigned long written) { const unsigned long period = roundup_pow_of_two(3 * HZ); unsigned long avg = wb->avg_write_bandwidth; unsigned long old = wb->write_bandwidth; u64 bw; /* * bw = written * HZ / elapsed * * bw * elapsed + write_bandwidth * (period - elapsed) * write_bandwidth = --------------------------------------------------- * period * * @written may have decreased due to folio_redirty_for_writepage(). * Avoid underflowing @bw calculation. */ bw = written - min(written, wb->written_stamp); bw *= HZ; if (unlikely(elapsed > period)) { bw = div64_ul(bw, elapsed); avg = bw; goto out; } bw += (u64)wb->write_bandwidth * (period - elapsed); bw >>= ilog2(period); /* * one more level of smoothing, for filtering out sudden spikes */ if (avg > old && old >= (unsigned long)bw) avg -= (avg - old) >> 3; if (avg < old && old <= (unsigned long)bw) avg += (old - avg) >> 3; out: /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */ avg = max(avg, 1LU); if (wb_has_dirty_io(wb)) { long delta = avg - wb->avg_write_bandwidth; WARN_ON_ONCE(atomic_long_add_return(delta, &wb->bdi->tot_write_bandwidth) <= 0); } wb->write_bandwidth = bw; WRITE_ONCE(wb->avg_write_bandwidth, avg); } static void update_dirty_limit(struct dirty_throttle_control *dtc) { struct wb_domain *dom = dtc_dom(dtc); unsigned long thresh = dtc->thresh; unsigned long limit = dom->dirty_limit; /* * Follow up in one step. */ if (limit < thresh) { limit = thresh; goto update; } /* * Follow down slowly. Use the higher one as the target, because thresh * may drop below dirty. This is exactly the reason to introduce * dom->dirty_limit which is guaranteed to lie above the dirty pages. */ thresh = max(thresh, dtc->dirty); if (limit > thresh) { limit -= (limit - thresh) >> 5; goto update; } return; update: dom->dirty_limit = limit; } static void domain_update_dirty_limit(struct dirty_throttle_control *dtc, unsigned long now) { struct wb_domain *dom = dtc_dom(dtc); /* * check locklessly first to optimize away locking for the most time */ if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) return; spin_lock(&dom->lock); if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) { update_dirty_limit(dtc); dom->dirty_limit_tstamp = now; } spin_unlock(&dom->lock); } /* * Maintain wb->dirty_ratelimit, the base dirty throttle rate. * * Normal wb tasks will be curbed at or below it in long term. * Obviously it should be around (write_bw / N) when there are N dd tasks. */ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, unsigned long dirtied, unsigned long elapsed) { struct bdi_writeback *wb = dtc->wb; unsigned long dirty = dtc->dirty; unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsigned long setpoint = (freerun + limit) / 2; unsigned long write_bw = wb->avg_write_bandwidth; unsigned long dirty_ratelimit = wb->dirty_ratelimit; unsigned long dirty_rate; unsigned long task_ratelimit; unsigned long balanced_dirty_ratelimit; unsigned long step; unsigned long x; unsigned long shift; /* * The dirty rate will match the writeout rate in long term, except * when dirty pages are truncated by userspace or re-dirtied by FS. */ dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; /* * task_ratelimit reflects each dd's dirty rate for the past 200ms. */ task_ratelimit = (u64)dirty_ratelimit * dtc->pos_ratio >> RATELIMIT_CALC_SHIFT; task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ /* * A linear estimation of the "balanced" throttle rate. The theory is, * if there are N dd tasks, each throttled at task_ratelimit, the wb's * dirty_rate will be measured to be (N * task_ratelimit). So the below * formula will yield the balanced rate limit (write_bw / N). * * Note that the expanded form is not a pure rate feedback: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) * but also takes pos_ratio into account: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) * * (1) is not realistic because pos_ratio also takes part in balancing * the dirty rate. Consider the state * pos_ratio = 0.5 (3) * rate = 2 * (write_bw / N) (4) * If (1) is used, it will stuck in that state! Because each dd will * be throttled at * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) * yielding * dirty_rate = N * task_ratelimit = write_bw (6) * put (6) into (1) we get * rate_(i+1) = rate_(i) (7) * * So we end up using (2) to always keep * rate_(i+1) ~= (write_bw / N) (8) * regardless of the value of pos_ratio. As long as (8) is satisfied, * pos_ratio is able to drive itself to 1.0, which is not only where * the dirty count meet the setpoint, but also where the slope of * pos_ratio is most flat and hence task_ratelimit is least fluctuated. */ balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, dirty_rate | 1); /* * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw */ if (unlikely(balanced_dirty_ratelimit > write_bw)) balanced_dirty_ratelimit = write_bw; /* * We could safely do this and return immediately: * * wb->dirty_ratelimit = balanced_dirty_ratelimit; * * However to get a more stable dirty_ratelimit, the below elaborated * code makes use of task_ratelimit to filter out singular points and * limit the step size. * * The below code essentially only uses the relative value of * * task_ratelimit - dirty_ratelimit * = (pos_ratio - 1) * dirty_ratelimit * * which reflects the direction and size of dirty position error. */ /* * dirty_ratelimit will follow balanced_dirty_ratelimit iff * task_ratelimit is on the same side of dirty_ratelimit, too. * For example, when * - dirty_ratelimit > balanced_dirty_ratelimit * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) * lowering dirty_ratelimit will help meet both the position and rate * control targets. Otherwise, don't update dirty_ratelimit if it will * only help meet the rate target. After all, what the users ultimately * feel and care are stable dirty rate and small position error. * * |task_ratelimit - dirty_ratelimit| is used to limit the step size * and filter out the singular points of balanced_dirty_ratelimit. Which * keeps jumping around randomly and can even leap far away at times * due to the small 200ms estimation period of dirty_rate (we want to * keep that period small to reduce time lags). */ step = 0; /* * For strictlimit case, calculations above were based on wb counters * and limits (starting from pos_ratio = wb_position_ratio() and up to * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). * Hence, to calculate "step" properly, we have to use wb_dirty as * "dirty" and wb_setpoint as "setpoint". */ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { dirty = dtc->wb_dirty; setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; } if (dirty < setpoint) { x = min3(wb->balanced_dirty_ratelimit, balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit < x) step = x - dirty_ratelimit; } else { x = max3(wb->balanced_dirty_ratelimit, balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit > x) step = dirty_ratelimit - x; } /* * Don't pursue 100% rate matching. It's impossible since the balanced * rate itself is constantly fluctuating. So decrease the track speed * when it gets close to the target. Helps eliminate pointless tremors. */ shift = dirty_ratelimit / (2 * step + 1); if (shift < BITS_PER_LONG) step = DIV_ROUND_UP(step >> shift, 8); else step = 0; if (dirty_ratelimit < balanced_dirty_ratelimit) dirty_ratelimit += step; else dirty_ratelimit -= step; WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL)); wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); } static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc, struct dirty_throttle_control *mdtc, bool update_ratelimit) { struct bdi_writeback *wb = gdtc->wb; unsigned long now = jiffies; unsigned long elapsed; unsigned long dirtied; unsigned long written; spin_lock(&wb->list_lock); /* * Lockless checks for elapsed time are racy and delayed update after * IO completion doesn't do it at all (to make sure written pages are * accounted reasonably quickly). Make sure elapsed >= 1 to avoid * division errors. */ elapsed = max(now - wb->bw_time_stamp, 1UL); dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); written = percpu_counter_read(&wb->stat[WB_WRITTEN]); if (update_ratelimit) { domain_update_dirty_limit(gdtc, now); wb_update_dirty_ratelimit(gdtc, dirtied, elapsed); /* * @mdtc is always NULL if !CGROUP_WRITEBACK but the * compiler has no way to figure that out. Help it. */ if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) { domain_update_dirty_limit(mdtc, now); wb_update_dirty_ratelimit(mdtc, dirtied, elapsed); } } wb_update_write_bandwidth(wb, elapsed, written); wb->dirtied_stamp = dirtied; wb->written_stamp = written; WRITE_ONCE(wb->bw_time_stamp, now); spin_unlock(&wb->list_lock); } void wb_update_bandwidth(struct bdi_writeback *wb) { struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; __wb_update_bandwidth(&gdtc, NULL, false); } /* Interval after which we consider wb idle and don't estimate bandwidth */ #define WB_BANDWIDTH_IDLE_JIF (HZ) static void wb_bandwidth_estimate_start(struct bdi_writeback *wb) { unsigned long now = jiffies; unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp); if (elapsed > WB_BANDWIDTH_IDLE_JIF && !atomic_read(&wb->writeback_inodes)) { spin_lock(&wb->list_lock); wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED); wb->written_stamp = wb_stat(wb, WB_WRITTEN); WRITE_ONCE(wb->bw_time_stamp, now); spin_unlock(&wb->list_lock); } } /* * After a task dirtied this many pages, balance_dirty_pages_ratelimited() * will look to see if it needs to start dirty throttling. * * If dirty_poll_interval is too low, big NUMA machines will call the expensive * global_zone_page_state() too often. So scale it near-sqrt to the safety margin * (the number of pages we may dirty without exceeding the dirty limits). */ static unsigned long dirty_poll_interval(unsigned long dirty, unsigned long thresh) { if (thresh > dirty) return 1UL << (ilog2(thresh - dirty) >> 1); return 1; } static unsigned long wb_max_pause(struct bdi_writeback *wb, unsigned long wb_dirty) { unsigned long bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long t; /* * Limit pause time for small memory systems. If sleeping for too long * time, a small pool of dirty/writeback pages may go empty and disk go * idle. * * 8 serves as the safety ratio. */ t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); t++; return min_t(unsigned long, t, MAX_PAUSE); } static long wb_min_pause(struct bdi_writeback *wb, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause) { long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth)); long lo = ilog2(READ_ONCE(wb->dirty_ratelimit)); long t; /* target pause */ long pause; /* estimated next pause */ int pages; /* target nr_dirtied_pause */ /* target for 10ms pause on 1-dd case */ t = max(1, HZ / 100); /* * Scale up pause time for concurrent dirtiers in order to reduce CPU * overheads. * * (N * 10ms) on 2^N concurrent tasks. */ if (hi > lo) t += (hi - lo) * (10 * HZ) / 1024; /* * This is a bit convoluted. We try to base the next nr_dirtied_pause * on the much more stable dirty_ratelimit. However the next pause time * will be computed based on task_ratelimit and the two rate limits may * depart considerably at some time. Especially if task_ratelimit goes * below dirty_ratelimit/2 and the target pause is max_pause, the next * pause time will be max_pause*2 _trimmed down_ to max_pause. As a * result task_ratelimit won't be executed faithfully, which could * eventually bring down dirty_ratelimit. * * We apply two rules to fix it up: * 1) try to estimate the next pause time and if necessary, use a lower * nr_dirtied_pause so as not to exceed max_pause. When this happens, * nr_dirtied_pause will be "dancing" with task_ratelimit. * 2) limit the target pause time to max_pause/2, so that the normal * small fluctuations of task_ratelimit won't trigger rule (1) and * nr_dirtied_pause will remain as stable as dirty_ratelimit. */ t = min(t, 1 + max_pause / 2); pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); /* * Tiny nr_dirtied_pause is found to hurt I/O performance in the test * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. * When the 16 consecutive reads are often interrupted by some dirty * throttling pause during the async writes, cfq will go into idles * (deadline is fine). So push nr_dirtied_pause as high as possible * until reaches DIRTY_POLL_THRESH=32 pages. */ if (pages < DIRTY_POLL_THRESH) { t = max_pause; pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); if (pages > DIRTY_POLL_THRESH) { pages = DIRTY_POLL_THRESH; t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit; } } pause = HZ * pages / (task_ratelimit + 1); if (pause > max_pause) { t = max_pause; pages = task_ratelimit * t / roundup_pow_of_two(HZ); } *nr_dirtied_pause = pages; /* * The minimal pause time will normally be half the target pause time. */ return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; } static inline void wb_dirty_limits(struct dirty_throttle_control *dtc) { struct bdi_writeback *wb = dtc->wb; unsigned long wb_reclaimable; /* * wb_thresh is not treated as some limiting factor as * dirty_thresh, due to reasons * - in JBOD setup, wb_thresh can fluctuate a lot * - in a system with HDD and USB key, the USB key may somehow * go into state (wb_dirty >> wb_thresh) either because * wb_dirty starts high, or because wb_thresh drops low. * In this case we don't want to hard throttle the USB key * dirtiers for 100 seconds until wb_dirty drops under * wb_thresh. Instead the auxiliary wb control line in * wb_position_ratio() will let the dirtier task progress * at some rate <= (write_bw / 2) for bringing down wb_dirty. */ dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh); dtc->wb_bg_thresh = dtc->thresh ? div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; /* * In order to avoid the stacked BDI deadlock we need * to ensure we accurately count the 'dirty' pages when * the threshold is low. * * Otherwise it would be possible to get thresh+n pages * reported dirty, even though there are thresh-m pages * actually dirty; with m+n sitting in the percpu * deltas. */ if (dtc->wb_thresh < 2 * wb_stat_error()) { wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); } else { wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); } } static unsigned long domain_poll_intv(struct dirty_throttle_control *dtc, bool strictlimit) { unsigned long dirty, thresh; if (strictlimit) { dirty = dtc->wb_dirty; thresh = dtc->wb_thresh; } else { dirty = dtc->dirty; thresh = dtc->thresh; } return dirty_poll_interval(dirty, thresh); } /* * Throttle it only when the background writeback cannot catch-up. This avoids * (excessively) small writeouts when the wb limits are ramping up in case of * !strictlimit. * * In strictlimit case make decision based on the wb counters and limits. Small * writeouts when the wb limits are ramping up are the price we consciously pay * for strictlimit-ing. */ static void domain_dirty_freerun(struct dirty_throttle_control *dtc, bool strictlimit) { unsigned long dirty, thresh, bg_thresh; if (unlikely(strictlimit)) { wb_dirty_limits(dtc); dirty = dtc->wb_dirty; thresh = dtc->wb_thresh; bg_thresh = dtc->wb_bg_thresh; } else { dirty = dtc->dirty; thresh = dtc->thresh; bg_thresh = dtc->bg_thresh; } dtc->freerun = dirty <= dirty_freerun_ceiling(thresh, bg_thresh); } static void balance_domain_limits(struct dirty_throttle_control *dtc, bool strictlimit) { domain_dirty_avail(dtc, true); domain_dirty_limits(dtc); domain_dirty_freerun(dtc, strictlimit); } static void wb_dirty_freerun(struct dirty_throttle_control *dtc, bool strictlimit) { dtc->freerun = false; /* was already handled in domain_dirty_freerun */ if (strictlimit) return; wb_dirty_limits(dtc); /* * LOCAL_THROTTLE tasks must not be throttled when below the per-wb * freerun ceiling. */ if (!(current->flags & PF_LOCAL_THROTTLE)) return; dtc->freerun = dtc->wb_dirty < dirty_freerun_ceiling(dtc->wb_thresh, dtc->wb_bg_thresh); } static inline void wb_dirty_exceeded(struct dirty_throttle_control *dtc, bool strictlimit) { dtc->dirty_exceeded = (dtc->wb_dirty > dtc->wb_thresh) && ((dtc->dirty > dtc->thresh) || strictlimit); } /* * The limits fields dirty_exceeded and pos_ratio won't be updated if wb is * in freerun state. Please don't use these invalid fields in freerun case. */ static void balance_wb_limits(struct dirty_throttle_control *dtc, bool strictlimit) { wb_dirty_freerun(dtc, strictlimit); if (dtc->freerun) return; wb_dirty_exceeded(dtc, strictlimit); wb_position_ratio(dtc); } /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. * If we're over `background_thresh' then the writeback threads are woken to * perform some writeout. */ static int balance_dirty_pages(struct bdi_writeback *wb, unsigned long pages_dirtied, unsigned int flags) { struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; struct dirty_throttle_control * const gdtc = &gdtc_stor; struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ? &mdtc_stor : NULL; struct dirty_throttle_control *sdtc; unsigned long nr_dirty; long period; long pause; long max_pause; long min_pause; int nr_dirtied_pause; unsigned long task_ratelimit; unsigned long dirty_ratelimit; struct backing_dev_info *bdi = wb->bdi; bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; unsigned long start_time = jiffies; int ret = 0; for (;;) { unsigned long now = jiffies; nr_dirty = global_node_page_state(NR_FILE_DIRTY); balance_domain_limits(gdtc, strictlimit); if (mdtc) { /* * If @wb belongs to !root memcg, repeat the same * basic calculations for the memcg domain. */ balance_domain_limits(mdtc, strictlimit); } /* * In laptop mode, we wait until hitting the higher threshold * before starting background writeout, and then write out all * the way down to the lower threshold. So slow writers cause * minimal disk activity. * * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ if (!laptop_mode && nr_dirty > gdtc->bg_thresh && !writeback_in_progress(wb)) wb_start_background_writeback(wb); /* * If memcg domain is in effect, @dirty should be under * both global and memcg freerun ceilings. */ if (gdtc->freerun && (!mdtc || mdtc->freerun)) { unsigned long intv; unsigned long m_intv; free_running: intv = domain_poll_intv(gdtc, strictlimit); m_intv = ULONG_MAX; current->dirty_paused_when = now; current->nr_dirtied = 0; if (mdtc) m_intv = domain_poll_intv(mdtc, strictlimit); current->nr_dirtied_pause = min(intv, m_intv); break; } /* Start writeback even when in laptop mode */ if (unlikely(!writeback_in_progress(wb))) wb_start_background_writeback(wb); mem_cgroup_flush_foreign(wb); /* * Calculate global domain's pos_ratio and select the * global dtc by default. */ balance_wb_limits(gdtc, strictlimit); if (gdtc->freerun) goto free_running; sdtc = gdtc; if (mdtc) { /* * If memcg domain is in effect, calculate its * pos_ratio. @wb should satisfy constraints from * both global and memcg domains. Choose the one * w/ lower pos_ratio. */ balance_wb_limits(mdtc, strictlimit); if (mdtc->freerun) goto free_running; if (mdtc->pos_ratio < gdtc->pos_ratio) sdtc = mdtc; } wb->dirty_exceeded = gdtc->dirty_exceeded || (mdtc && mdtc->dirty_exceeded); if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + BANDWIDTH_INTERVAL)) __wb_update_bandwidth(gdtc, mdtc, true); /* throttle according to the chosen dtc */ dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit); task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >> RATELIMIT_CALC_SHIFT; max_pause = wb_max_pause(wb, sdtc->wb_dirty); min_pause = wb_min_pause(wb, max_pause, task_ratelimit, dirty_ratelimit, &nr_dirtied_pause); if (unlikely(task_ratelimit == 0)) { period = max_pause; pause = max_pause; goto pause; } period = HZ * pages_dirtied / task_ratelimit; pause = period; if (current->dirty_paused_when) pause -= now - current->dirty_paused_when; /* * For less than 1s think time (ext3/4 may block the dirtier * for up to 800ms from time to time on 1-HDD; so does xfs, * however at much less frequency), try to compensate it in * future periods by updating the virtual time; otherwise just * do a reset, as it may be a light dirtier. */ if (pause < min_pause) { trace_balance_dirty_pages(wb, sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, period, min(pause, 0L), start_time); if (pause < -HZ) { current->dirty_paused_when = now; current->nr_dirtied = 0; } else if (period) { current->dirty_paused_when += period; current->nr_dirtied = 0; } else if (current->nr_dirtied_pause <= pages_dirtied) current->nr_dirtied_pause += pages_dirtied; break; } if (unlikely(pause > max_pause)) { /* for occasional dropped task_ratelimit */ now += min(pause - max_pause, max_pause); pause = max_pause; } pause: trace_balance_dirty_pages(wb, sdtc, dirty_ratelimit, task_ratelimit, pages_dirtied, period, pause, start_time); if (flags & BDP_ASYNC) { ret = -EAGAIN; break; } __set_current_state(TASK_KILLABLE); bdi->last_bdp_sleep = jiffies; io_schedule_timeout(pause); current->dirty_paused_when = now + pause; current->nr_dirtied = 0; current->nr_dirtied_pause = nr_dirtied_pause; /* * This is typically equal to (dirty < thresh) and can also * keep "1000+ dd on a slow USB stick" under control. */ if (task_ratelimit) break; /* * In the case of an unresponsive NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good wb's a pipe * to go through, so that tasks on them still remain responsive. * * In theory 1 page is enough to keep the consumer-producer * pipe going: the flusher cleans 1 page => the task dirties 1 * more page. However wb_dirty has accounting errors. So use * the larger and more IO friendly wb_stat_error. */ if (sdtc->wb_dirty <= wb_stat_error()) break; if (fatal_signal_pending(current)) break; } return ret; } static DEFINE_PER_CPU(int, bdp_ratelimits); /* * Normal tasks are throttled by * loop { * dirty tsk->nr_dirtied_pause pages; * take a snap in balance_dirty_pages(); * } * However there is a worst case. If every task exit immediately when dirtied * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be * called to throttle the page dirties. The solution is to save the not yet * throttled page dirties in dirty_throttle_leaks on task exit and charge them * randomly into the running tasks. This works well for the above worst case, * as the new task will pick up and accumulate the old task's leaked dirty * count and eventually get throttled. */ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; /** * balance_dirty_pages_ratelimited_flags - Balance dirty memory state. * @mapping: address_space which was dirtied. * @flags: BDP flags. * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * See balance_dirty_pages_ratelimited() for details. * * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to * indicate that memory is out of balance and the caller must wait * for I/O to complete. Otherwise, it will return 0 to indicate * that either memory was already in balance, or it was able to sleep * until the amount of dirty memory returned to balance. */ int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, unsigned int flags) { struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); struct bdi_writeback *wb = NULL; int ratelimit; int ret = 0; int *p; if (!(bdi->capabilities & BDI_CAP_WRITEBACK)) return ret; if (inode_cgwb_enabled(inode)) wb = wb_get_create_current(bdi, GFP_KERNEL); if (!wb) wb = &bdi->wb; ratelimit = current->nr_dirtied_pause; if (wb->dirty_exceeded) ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); preempt_disable(); /* * This prevents one CPU to accumulate too many dirtied pages without * calling into balance_dirty_pages(), which can happen when there are * 1000+ tasks, all of them start dirtying pages at exactly the same * time, hence all honoured too large initial task->nr_dirtied_pause. */ p = this_cpu_ptr(&bdp_ratelimits); if (unlikely(current->nr_dirtied >= ratelimit)) *p = 0; else if (unlikely(*p >= ratelimit_pages)) { *p = 0; ratelimit = 0; } /* * Pick up the dirtied pages by the exited tasks. This avoids lots of * short-lived tasks (eg. gcc invocations in a kernel build) escaping * the dirty throttling and livelock other long-run dirtiers. */ p = this_cpu_ptr(&dirty_throttle_leaks); if (*p > 0 && current->nr_dirtied < ratelimit) { unsigned long nr_pages_dirtied; nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); *p -= nr_pages_dirtied; current->nr_dirtied += nr_pages_dirtied; } preempt_enable(); if (unlikely(current->nr_dirtied >= ratelimit)) ret = balance_dirty_pages(wb, current->nr_dirtied, flags); wb_put(wb); return ret; } EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags); /** * balance_dirty_pages_ratelimited - balance dirty memory state. * @mapping: address_space which was dirtied. * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * Once we're over the dirty memory limit we decrease the ratelimiting * by a lot, to prevent individual processes from overshooting the limit * by (ratelimit_pages) each. */ void balance_dirty_pages_ratelimited(struct address_space *mapping) { balance_dirty_pages_ratelimited_flags(mapping, 0); } EXPORT_SYMBOL(balance_dirty_pages_ratelimited); /* * Similar to wb_dirty_limits, wb_bg_dirty_limits also calculates dirty * and thresh, but it's for background writeback. */ static void wb_bg_dirty_limits(struct dirty_throttle_control *dtc) { struct bdi_writeback *wb = dtc->wb; dtc->wb_bg_thresh = __wb_calc_thresh(dtc, dtc->bg_thresh); if (dtc->wb_bg_thresh < 2 * wb_stat_error()) dtc->wb_dirty = wb_stat_sum(wb, WB_RECLAIMABLE); else dtc->wb_dirty = wb_stat(wb, WB_RECLAIMABLE); } static bool domain_over_bg_thresh(struct dirty_throttle_control *dtc) { domain_dirty_avail(dtc, false); domain_dirty_limits(dtc); if (dtc->dirty > dtc->bg_thresh) return true; wb_bg_dirty_limits(dtc); if (dtc->wb_dirty > dtc->wb_bg_thresh) return true; return false; } /** * wb_over_bg_thresh - does @wb need to be written back? * @wb: bdi_writeback of interest * * Determines whether background writeback should keep writing @wb or it's * clean enough. * * Return: %true if writeback should continue. */ bool wb_over_bg_thresh(struct bdi_writeback *wb) { struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) }; if (domain_over_bg_thresh(&gdtc)) return true; if (mdtc_valid(&mdtc)) return domain_over_bg_thresh(&mdtc); return false; } #ifdef CONFIG_SYSCTL /* * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { unsigned int old_interval = dirty_writeback_interval; int ret; ret = proc_dointvec(table, write, buffer, length, ppos); /* * Writing 0 to dirty_writeback_interval will disable periodic writeback * and a different non-zero value will wakeup the writeback threads. * wb_wakeup_delayed() would be more appropriate, but it's a pain to * iterate over all bdis and wbs. * The reason we do this is to make the change take effect immediately. */ if (!ret && write && dirty_writeback_interval && dirty_writeback_interval != old_interval) wakeup_flusher_threads(WB_REASON_PERIODIC); return ret; } #endif void laptop_mode_timer_fn(struct timer_list *t) { struct backing_dev_info *backing_dev_info = from_timer(backing_dev_info, t, laptop_mode_wb_timer); wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER); } /* * We've spun up the disk and we're in laptop mode: schedule writeback * of all dirty data a few seconds from now. If the flush is already scheduled * then push it back - the user is still using the disk. */ void laptop_io_completion(struct backing_dev_info *info) { mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); } /* * We're in laptop mode and we've just synced. The sync's writes will have * caused another writeback to be scheduled by laptop_io_completion. * Nothing needs to be written back anymore, so we unschedule the writeback. */ void laptop_sync_completion(void) { struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) timer_delete(&bdi->laptop_mode_wb_timer); rcu_read_unlock(); } /* * If ratelimit_pages is too high then we can get into dirty-data overload * if a large number of processes all perform writes at the same time. * * Here we set ratelimit_pages to a level which ensures that when all CPUs are * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory * thresholds. */ void writeback_set_ratelimit(void) { struct wb_domain *dom = &global_wb_domain; unsigned long background_thresh; unsigned long dirty_thresh; global_dirty_limits(&background_thresh, &dirty_thresh); dom->dirty_limit = dirty_thresh; ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); if (ratelimit_pages < 16) ratelimit_pages = 16; } static int page_writeback_cpu_online(unsigned int cpu) { writeback_set_ratelimit(); return 0; } #ifdef CONFIG_SYSCTL /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE; static const struct ctl_table vm_page_writeback_sysctls[] = { { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, .maxlen = sizeof(dirty_background_ratio), .mode = 0644, .proc_handler = dirty_background_ratio_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, { .procname = "dirty_background_bytes", .data = &dirty_background_bytes, .maxlen = sizeof(dirty_background_bytes), .mode = 0644, .proc_handler = dirty_background_bytes_handler, .extra1 = SYSCTL_LONG_ONE, }, { .procname = "dirty_ratio", .data = &vm_dirty_ratio, .maxlen = sizeof(vm_dirty_ratio), .mode = 0644, .proc_handler = dirty_ratio_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, { .procname = "dirty_bytes", .data = &vm_dirty_bytes, .maxlen = sizeof(vm_dirty_bytes), .mode = 0644, .proc_handler = dirty_bytes_handler, .extra1 = (void *)&dirty_bytes_min, }, { .procname = "dirty_writeback_centisecs", .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), .mode = 0644, .proc_handler = dirty_writeback_centisecs_handler, }, { .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #ifdef CONFIG_HIGHMEM { .procname = "highmem_is_dirtyable", .data = &vm_highmem_is_dirtyable, .maxlen = sizeof(vm_highmem_is_dirtyable), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #endif { .procname = "laptop_mode", .data = &laptop_mode, .maxlen = sizeof(laptop_mode), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, }; #endif /* * Called early on to tune the page writeback dirty limits. * * We used to scale dirty pages according to how total memory * related to pages that could be allocated for buffers. * * However, that was when we used "dirty_ratio" to scale with * all memory, and we don't do that any more. "dirty_ratio" * is now applied to total non-HIGHPAGE memory, and as such we can't * get into the old insane situation any more where we had * large amounts of dirty pages compared to a small amount of * non-HIGHMEM memory. * * But we might still want to scale the dirty_ratio by how * much memory the box has.. */ void __init page_writeback_init(void) { BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online", page_writeback_cpu_online, NULL); cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL, page_writeback_cpu_online); #ifdef CONFIG_SYSCTL register_sysctl_init("vm", vm_page_writeback_sysctls); #endif } /** * tag_pages_for_writeback - tag pages to be written by writeback * @mapping: address space structure to write * @start: starting page index * @end: ending page index (inclusive) * * This function scans the page range from @start to @end (inclusive) and tags * all pages that have DIRTY tag set with a special TOWRITE tag. The caller * can then use the TOWRITE tag to identify pages eligible for writeback. * This mechanism is used to avoid livelocking of writeback by a process * steadily creating new dirty pages in the file (thus it is important for this * function to be quick so that it can tag pages faster than a dirtying process * can create them). */ void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) { XA_STATE(xas, &mapping->i_pages, start); unsigned int tagged = 0; void *page; xas_lock_irq(&xas); xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); if (++tagged % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); } EXPORT_SYMBOL(tag_pages_for_writeback); static bool folio_prepare_writeback(struct address_space *mapping, struct writeback_control *wbc, struct folio *folio) { /* * Folio truncated or invalidated. We can freely skip it then, * even for data integrity operations: the folio has disappeared * concurrently, so there could be no real expectation of this * data integrity operation even if there is now a new, dirty * folio at the same pagecache index. */ if (unlikely(folio->mapping != mapping)) return false; /* * Did somebody else write it for us? */ if (!folio_test_dirty(folio)) return false; if (folio_test_writeback(folio)) { if (wbc->sync_mode == WB_SYNC_NONE) return false; folio_wait_writeback(folio); } BUG_ON(folio_test_writeback(folio)); if (!folio_clear_dirty_for_io(folio)) return false; return true; } static xa_mark_t wbc_to_tag(struct writeback_control *wbc) { if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) return PAGECACHE_TAG_TOWRITE; return PAGECACHE_TAG_DIRTY; } static pgoff_t wbc_end(struct writeback_control *wbc) { if (wbc->range_cyclic) return -1; return wbc->range_end >> PAGE_SHIFT; } static struct folio *writeback_get_folio(struct address_space *mapping, struct writeback_control *wbc) { struct folio *folio; retry: folio = folio_batch_next(&wbc->fbatch); if (!folio) { folio_batch_release(&wbc->fbatch); cond_resched(); filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc), wbc_to_tag(wbc), &wbc->fbatch); folio = folio_batch_next(&wbc->fbatch); if (!folio) return NULL; } folio_lock(folio); if (unlikely(!folio_prepare_writeback(mapping, wbc, folio))) { folio_unlock(folio); goto retry; } trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); return folio; } /** * writeback_iter - iterate folio of a mapping for writeback * @mapping: address space structure to write * @wbc: writeback context * @folio: previously iterated folio (%NULL to start) * @error: in-out pointer for writeback errors (see below) * * This function returns the next folio for the writeback operation described by * @wbc on @mapping and should be called in a while loop in the ->writepages * implementation. * * To start the writeback operation, %NULL is passed in the @folio argument, and * for every subsequent iteration the folio returned previously should be passed * back in. * * If there was an error in the per-folio writeback inside the writeback_iter() * loop, @error should be set to the error value. * * Once the writeback described in @wbc has finished, this function will return * %NULL and if there was an error in any iteration restore it to @error. * * Note: callers should not manually break out of the loop using break or goto * but must keep calling writeback_iter() until it returns %NULL. * * Return: the folio to write or %NULL if the loop is done. */ struct folio *writeback_iter(struct address_space *mapping, struct writeback_control *wbc, struct folio *folio, int *error) { if (!folio) { folio_batch_init(&wbc->fbatch); wbc->saved_err = *error = 0; /* * For range cyclic writeback we remember where we stopped so * that we can continue where we stopped. * * For non-cyclic writeback we always start at the beginning of * the passed in range. */ if (wbc->range_cyclic) wbc->index = mapping->writeback_index; else wbc->index = wbc->range_start >> PAGE_SHIFT; /* * To avoid livelocks when other processes dirty new pages, we * first tag pages which should be written back and only then * start writing them. * * For data-integrity writeback we have to be careful so that we * do not miss some pages (e.g., because some other process has * cleared the TOWRITE tag we set). The rule we follow is that * TOWRITE tag can be cleared only by the process clearing the * DIRTY tag (and submitting the page for I/O). */ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, wbc->index, wbc_end(wbc)); } else { wbc->nr_to_write -= folio_nr_pages(folio); WARN_ON_ONCE(*error > 0); /* * For integrity writeback we have to keep going until we have * written all the folios we tagged for writeback above, even if * we run past wbc->nr_to_write or encounter errors. * We stash away the first error we encounter in wbc->saved_err * so that it can be retrieved when we're done. This is because * the file system may still have state to clear for each folio. * * For background writeback we exit as soon as we run past * wbc->nr_to_write or encounter the first error. */ if (wbc->sync_mode == WB_SYNC_ALL) { if (*error && !wbc->saved_err) wbc->saved_err = *error; } else { if (*error || wbc->nr_to_write <= 0) goto done; } } folio = writeback_get_folio(mapping, wbc); if (!folio) { /* * To avoid deadlocks between range_cyclic writeback and callers * that hold pages in PageWriteback to aggregate I/O until * the writeback iteration finishes, we do not loop back to the * start of the file. Doing so causes a page lock/page * writeback access order inversion - we should only ever lock * multiple pages in ascending page->index order, and looping * back to the start of the file violates that rule and causes * deadlocks. */ if (wbc->range_cyclic) mapping->writeback_index = 0; /* * Return the first error we encountered (if there was any) to * the caller. */ *error = wbc->saved_err; } return folio; done: if (wbc->range_cyclic) mapping->writeback_index = folio_next_index(folio); folio_batch_release(&wbc->fbatch); return NULL; } EXPORT_SYMBOL_GPL(writeback_iter); /** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * @writepage: function called for each page * @data: data passed to writepage function * * Return: %0 on success, negative error code otherwise * * Note: please use writeback_iter() instead. */ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) { struct folio *folio = NULL; int error; while ((folio = writeback_iter(mapping, wbc, folio, &error))) { error = writepage(folio, wbc, data); if (error == AOP_WRITEPAGE_ACTIVATE) { folio_unlock(folio); error = 0; } } return error; } EXPORT_SYMBOL(write_cache_pages); static int writeback_use_writepage(struct address_space *mapping, struct writeback_control *wbc) { struct folio *folio = NULL; struct blk_plug plug; int err; blk_start_plug(&plug); while ((folio = writeback_iter(mapping, wbc, folio, &err))) { err = mapping->a_ops->writepage(&folio->page, wbc); if (err == AOP_WRITEPAGE_ACTIVATE) { folio_unlock(folio); err = 0; } mapping_set_error(mapping, err); } blk_finish_plug(&plug); return err; } int do_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; struct bdi_writeback *wb; if (wbc->nr_to_write <= 0) return 0; wb = inode_to_wb_wbc(mapping->host, wbc); wb_bandwidth_estimate_start(wb); while (1) { if (mapping->a_ops->writepages) { ret = mapping->a_ops->writepages(mapping, wbc); } else if (mapping->a_ops->writepage) { ret = writeback_use_writepage(mapping, wbc); } else { /* deal with chardevs and other special files */ ret = 0; } if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL) break; /* * Lacking an allocation context or the locality or writeback * state of any of the inode's pages, throttle based on * writeback activity on the local node. It's as good a * guess as any. */ reclaim_throttle(NODE_DATA(numa_node_id()), VMSCAN_THROTTLE_WRITEBACK); } /* * Usually few pages are written by now from those we've just submitted * but if there's constant writeback being submitted, this makes sure * writeback bandwidth is updated once in a while. */ if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + BANDWIDTH_INTERVAL)) wb_update_bandwidth(wb); return ret; } /* * For address_spaces which do not use buffers nor write back. */ bool noop_dirty_folio(struct address_space *mapping, struct folio *folio) { if (!folio_test_dirty(folio)) return !folio_test_set_dirty(folio); return false; } EXPORT_SYMBOL(noop_dirty_folio); /* * Helper function for set_page_dirty family. * * NOTE: This relies on being atomic wrt interrupts. */ static void folio_account_dirtied(struct folio *folio, struct address_space *mapping) { struct inode *inode = mapping->host; trace_writeback_dirty_folio(folio, mapping); if (mapping_can_writeback(mapping)) { struct bdi_writeback *wb; long nr = folio_nr_pages(folio); inode_attach_wb(inode, folio); wb = inode_to_wb(inode); __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr); __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); __node_stat_mod_folio(folio, NR_DIRTIED, nr); wb_stat_mod(wb, WB_RECLAIMABLE, nr); wb_stat_mod(wb, WB_DIRTIED, nr); task_io_account_write(nr * PAGE_SIZE); current->nr_dirtied += nr; __this_cpu_add(bdp_ratelimits, nr); mem_cgroup_track_foreign_dirty(folio, wb); } } /* * Helper function for deaccounting dirty page without writeback. * */ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) { long nr = folio_nr_pages(folio); lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); wb_stat_mod(wb, WB_RECLAIMABLE, -nr); task_io_account_cancelled_write(nr * PAGE_SIZE); } /* * Mark the folio dirty, and set it dirty in the page cache. * * If warn is true, then emit a warning if the folio is not uptodate and has * not been truncated. * * It is the caller's responsibility to prevent the folio from being truncated * while this function is in progress, although it may have been truncated * before this function is called. Most callers have the folio locked. * A few have the folio blocked from truncation through other means (e.g. * zap_vma_pages() has it mapped and is holding the page table lock). * When called from mark_buffer_dirty(), the filesystem should hold a * reference to the buffer_head that is being marked dirty, which causes * try_to_free_buffers() to fail. */ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, int warn) { unsigned long flags; xa_lock_irqsave(&mapping->i_pages, flags); if (folio->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !folio_test_uptodate(folio)); folio_account_dirtied(folio, mapping); __xa_set_mark(&mapping->i_pages, folio_index(folio), PAGECACHE_TAG_DIRTY); } xa_unlock_irqrestore(&mapping->i_pages, flags); } /** * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads. * @mapping: Address space this folio belongs to. * @folio: Folio to be marked as dirty. * * Filesystems which do not use buffer heads should call this function * from their dirty_folio address space operation. It ignores the * contents of folio_get_private(), so if the filesystem marks individual * blocks as dirty, the filesystem should handle that itself. * * This is also sometimes used by filesystems which use buffer_heads when * a single buffer is being dirtied: we want to set the folio dirty in * that case, but not all the buffers. This is a "bottom-up" dirtying, * whereas block_dirty_folio() is a "top-down" dirtying. * * The caller must ensure this doesn't race with truncation. Most will * simply hold the folio lock, but e.g. zap_pte_range() calls with the * folio mapped and the pte lock held, which also locks out truncation. */ bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio) { if (folio_test_set_dirty(folio)) return false; __folio_mark_dirty(folio, mapping, !folio_test_private(folio)); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return true; } EXPORT_SYMBOL(filemap_dirty_folio); /** * folio_redirty_for_writepage - Decline to write a dirty folio. * @wbc: The writeback control. * @folio: The folio. * * When a writepage implementation decides that it doesn't want to write * @folio for some reason, it should call this function, unlock @folio and * return 0. * * Return: True if we redirtied the folio. False if someone else dirtied * it first. */ bool folio_redirty_for_writepage(struct writeback_control *wbc, struct folio *folio) { struct address_space *mapping = folio->mapping; long nr = folio_nr_pages(folio); bool ret; wbc->pages_skipped += nr; ret = filemap_dirty_folio(mapping, folio); if (mapping && mapping_can_writeback(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; struct wb_lock_cookie cookie = {}; wb = unlocked_inode_to_wb_begin(inode, &cookie); current->nr_dirtied -= nr; node_stat_mod_folio(folio, NR_DIRTIED, -nr); wb_stat_mod(wb, WB_DIRTIED, -nr); unlocked_inode_to_wb_end(inode, &cookie); } return ret; } EXPORT_SYMBOL(folio_redirty_for_writepage); /** * folio_mark_dirty - Mark a folio as being modified. * @folio: The folio. * * The folio may not be truncated while this function is running. * Holding the folio lock is sufficient to prevent truncation, but some * callers cannot acquire a sleeping lock. These callers instead hold * the page table lock for a page table which contains at least one page * in this folio. Truncation will block on the page table lock as it * unmaps pages before removing the folio from its mapping. * * Return: True if the folio was newly dirtied, false if it was already dirty. */ bool folio_mark_dirty(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); if (likely(mapping)) { /* * readahead/folio_deactivate could remain * PG_readahead/PG_reclaim due to race with folio_end_writeback * About readahead, if the folio is written, the flags would be * reset. So no problem. * About folio_deactivate, if the folio is redirtied, * the flag will be reset. So no problem. but if the * folio is used by readahead it will confuse readahead * and make it restart the size rampup process. But it's * a trivial problem. */ if (folio_test_reclaim(folio)) folio_clear_reclaim(folio); return mapping->a_ops->dirty_folio(mapping, folio); } return noop_dirty_folio(mapping, folio); } EXPORT_SYMBOL(folio_mark_dirty); /* * folio_mark_dirty() is racy if the caller has no reference against * folio->mapping->host, and if the folio is unlocked. This is because another * CPU could truncate the folio off the mapping and then free the mapping. * * Usually, the folio _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * * In other cases, the folio should be locked before running folio_mark_dirty(). */ bool folio_mark_dirty_lock(struct folio *folio) { bool ret; folio_lock(folio); ret = folio_mark_dirty(folio); folio_unlock(folio); return ret; } EXPORT_SYMBOL(folio_mark_dirty_lock); /* * This cancels just the dirty bit on the kernel page itself, it does NOT * actually remove dirty bits on any mmap's that may be around. It also * leaves the page tagged dirty, so any sync activity will still find it on * the dirty lists, and in particular, clear_page_dirty_for_io() will still * look at the dirty bits in the VM. * * Doing this should *normally* only ever be done when a page is truncated, * and is not actually mapped anywhere at all. However, fs/buffer.c does * this when it notices that somebody has cleaned out all the buffers on a * page without actually doing it through the VM. Can you say "ext3 is * horribly ugly"? Thought you could. */ void __folio_cancel_dirty(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); if (mapping_can_writeback(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; struct wb_lock_cookie cookie = {}; wb = unlocked_inode_to_wb_begin(inode, &cookie); if (folio_test_clear_dirty(folio)) folio_account_cleaned(folio, wb); unlocked_inode_to_wb_end(inode, &cookie); } else { folio_clear_dirty(folio); } } EXPORT_SYMBOL(__folio_cancel_dirty); /* * Clear a folio's dirty flag, while caring for dirty memory accounting. * Returns true if the folio was previously dirty. * * This is for preparing to put the folio under writeout. We leave * the folio tagged as dirty in the xarray so that a concurrent * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk. * The ->writepage implementation will run either folio_start_writeback() * or folio_mark_dirty(), at which stage we bring the folio's dirty flag * and xarray dirty tag back into sync. * * This incoherency between the folio's dirty flag and xarray tag is * unfortunate, but it only exists while the folio is locked. */ bool folio_clear_dirty_for_io(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); bool ret = false; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (mapping && mapping_can_writeback(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; struct wb_lock_cookie cookie = {}; /* * Yes, Virginia, this is indeed insane. * * We use this sequence to make sure that * (a) we account for dirty stats properly * (b) we tell the low-level filesystem to * mark the whole folio dirty if it was * dirty in a pagetable. Only to then * (c) clean the folio again and return 1 to * cause the writeback. * * This way we avoid all nasty races with the * dirty bit in multiple places and clearing * them concurrently from different threads. * * Note! Normally the "folio_mark_dirty(folio)" * has no effect on the actual dirty bit - since * that will already usually be set. But we * need the side effects, and it can help us * avoid races. * * We basically use the folio "master dirty bit" * as a serialization point for all the different * threads doing their things. */ if (folio_mkclean(folio)) folio_mark_dirty(folio); /* * We carefully synchronise fault handlers against * installing a dirty pte and marking the folio dirty * at this point. We do this by having them hold the * page lock while dirtying the folio, and folios are * always locked coming in here, so we get the desired * exclusion. */ wb = unlocked_inode_to_wb_begin(inode, &cookie); if (folio_test_clear_dirty(folio)) { long nr = folio_nr_pages(folio); lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); wb_stat_mod(wb, WB_RECLAIMABLE, -nr); ret = true; } unlocked_inode_to_wb_end(inode, &cookie); return ret; } return folio_test_clear_dirty(folio); } EXPORT_SYMBOL(folio_clear_dirty_for_io); static void wb_inode_writeback_start(struct bdi_writeback *wb) { atomic_inc(&wb->writeback_inodes); } static void wb_inode_writeback_end(struct bdi_writeback *wb) { unsigned long flags; atomic_dec(&wb->writeback_inodes); /* * Make sure estimate of writeback throughput gets updated after * writeback completed. We delay the update by BANDWIDTH_INTERVAL * (which is the interval other bandwidth updates use for batching) so * that if multiple inodes end writeback at a similar time, they get * batched into one bandwidth update. */ spin_lock_irqsave(&wb->work_lock, flags); if (test_bit(WB_registered, &wb->state)) queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); spin_unlock_irqrestore(&wb->work_lock, flags); } bool __folio_end_writeback(struct folio *folio) { long nr = folio_nr_pages(folio); struct address_space *mapping = folio_mapping(folio); bool ret; if (mapping && mapping_use_writeback_tags(mapping)) { struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); unsigned long flags; xa_lock_irqsave(&mapping->i_pages, flags); ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback); __xa_clear_mark(&mapping->i_pages, folio_index(folio), PAGECACHE_TAG_WRITEBACK); if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { struct bdi_writeback *wb = inode_to_wb(inode); wb_stat_mod(wb, WB_WRITEBACK, -nr); __wb_writeout_add(wb, nr); if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) wb_inode_writeback_end(wb); } if (mapping->host && !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) sb_clear_inode_writeback(mapping->host); xa_unlock_irqrestore(&mapping->i_pages, flags); } else { ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback); } lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); node_stat_mod_folio(folio, NR_WRITTEN, nr); return ret; } void __folio_start_writeback(struct folio *folio, bool keep_write) { long nr = folio_nr_pages(folio); struct address_space *mapping = folio_mapping(folio); int access_ret; VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (mapping && mapping_use_writeback_tags(mapping)) { XA_STATE(xas, &mapping->i_pages, folio_index(folio)); struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); unsigned long flags; bool on_wblist; xas_lock_irqsave(&xas, flags); xas_load(&xas); folio_test_set_writeback(folio); on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK); xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { struct bdi_writeback *wb = inode_to_wb(inode); wb_stat_mod(wb, WB_WRITEBACK, nr); if (!on_wblist) wb_inode_writeback_start(wb); } /* * We can come through here when swapping anonymous * folios, so we don't necessarily have an inode to * track for sync. */ if (mapping->host && !on_wblist) sb_mark_inode_writeback(mapping->host); if (!folio_test_dirty(folio)) xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); if (!keep_write) xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irqrestore(&xas, flags); } else { folio_test_set_writeback(folio); } lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); access_ret = arch_make_folio_accessible(folio); /* * If writeback has been triggered on a page that cannot be made * accessible, it is too late to recover here. */ VM_BUG_ON_FOLIO(access_ret != 0, folio); } EXPORT_SYMBOL(__folio_start_writeback); /** * folio_wait_writeback - Wait for a folio to finish writeback. * @folio: The folio to wait for. * * If the folio is currently being written back to storage, wait for the * I/O to complete. * * Context: Sleeps. Must be called in process context and with * no spinlocks held. Caller should hold a reference on the folio. * If the folio is not locked, writeback may start again after writeback * has finished. */ void folio_wait_writeback(struct folio *folio) { while (folio_test_writeback(folio)) { trace_folio_wait_writeback(folio, folio_mapping(folio)); folio_wait_bit(folio, PG_writeback); } } EXPORT_SYMBOL_GPL(folio_wait_writeback); /** * folio_wait_writeback_killable - Wait for a folio to finish writeback. * @folio: The folio to wait for. * * If the folio is currently being written back to storage, wait for the * I/O to complete or a fatal signal to arrive. * * Context: Sleeps. Must be called in process context and with * no spinlocks held. Caller should hold a reference on the folio. * If the folio is not locked, writeback may start again after writeback * has finished. * Return: 0 on success, -EINTR if we get a fatal signal while waiting. */ int folio_wait_writeback_killable(struct folio *folio) { while (folio_test_writeback(folio)) { trace_folio_wait_writeback(folio, folio_mapping(folio)); if (folio_wait_bit_killable(folio, PG_writeback)) return -EINTR; } return 0; } EXPORT_SYMBOL_GPL(folio_wait_writeback_killable); /** * folio_wait_stable() - wait for writeback to finish, if necessary. * @folio: The folio to wait on. * * This function determines if the given folio is related to a backing * device that requires folio contents to be held stable during writeback. * If so, then it will wait for any pending writeback to complete. * * Context: Sleeps. Must be called in process context and with * no spinlocks held. Caller should hold a reference on the folio. * If the folio is not locked, writeback may start again after writeback * has finished. */ void folio_wait_stable(struct folio *folio) { if (mapping_stable_writes(folio_mapping(folio))) folio_wait_writeback(folio); } EXPORT_SYMBOL_GPL(folio_wait_stable);
1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 // SPDX-License-Identifier: GPL-2.0 /* * Emagic EMI 2|6 usb audio interface firmware loader. * Copyright (C) 2002 * Tapio Laxström (tapio.laxstrom@iptime.fi) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/ihex.h> /* include firmware (variables)*/ /* FIXME: This is quick and dirty solution! */ #define SPDIF /* if you want SPDIF comment next line */ //#undef SPDIF /* if you want MIDI uncomment this line */ #ifdef SPDIF #define FIRMWARE_FW "emi62/spdif.fw" #else #define FIRMWARE_FW "emi62/midi.fw" #endif #define EMI62_VENDOR_ID 0x086a /* Emagic Soft-und Hardware GmBH */ #define EMI62_PRODUCT_ID 0x0110 /* EMI 6|2m without firmware */ #define ANCHOR_LOAD_INTERNAL 0xA0 /* Vendor specific request code for Anchor Upload/Download (This one is implemented in the core) */ #define ANCHOR_LOAD_EXTERNAL 0xA3 /* This command is not implemented in the core. Requires firmware */ #define ANCHOR_LOAD_FPGA 0xA5 /* This command is not implemented in the core. Requires firmware. Emagic extension */ #define MAX_INTERNAL_ADDRESS 0x1B3F /* This is the highest internal RAM address for the AN2131Q */ #define CPUCS_REG 0x7F92 /* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */ #define INTERNAL_RAM(address) (address <= MAX_INTERNAL_ADDRESS) static int emi62_writememory(struct usb_device *dev, int address, const unsigned char *data, int length, __u8 bRequest); static int emi62_set_reset(struct usb_device *dev, unsigned char reset_bit); static int emi62_load_firmware (struct usb_device *dev); static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id); static void emi62_disconnect(struct usb_interface *intf); /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi62_writememory(struct usb_device *dev, int address, const unsigned char *data, int length, __u8 request) { int result; unsigned char *buffer = kmemdup(data, length, GFP_KERNEL); if (!buffer) { dev_err(&dev->dev, "kmalloc(%d) failed.\n", length); return -ENOMEM; } /* Note: usb_control_msg returns negative value on error or length of the * data that was written! */ result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300); kfree (buffer); return result; } /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi62_set_reset (struct usb_device *dev, unsigned char reset_bit) { int response; dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit); response = emi62_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0); if (response < 0) dev_err(&dev->dev, "set_reset (%d) failed\n", reset_bit); return response; } #define FW_LOAD_SIZE 1023 static int emi62_load_firmware (struct usb_device *dev) { const struct firmware *loader_fw = NULL; const struct firmware *bitstream_fw = NULL; const struct firmware *firmware_fw = NULL; const struct ihex_binrec *rec; int err = -ENOMEM; int i; __u32 addr; /* Address to write */ __u8 *buf; dev_dbg(&dev->dev, "load_firmware\n"); buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL); if (!buf) goto wraperr; err = request_ihex_firmware(&loader_fw, "emi62/loader.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&bitstream_fw, "emi62/bitstream.fw", &dev->dev); if (err) goto nofw; err = request_ihex_firmware(&firmware_fw, FIRMWARE_FW, &dev->dev); if (err) { nofw: goto wraperr; } /* Assert reset (stop the CPU in the EMI) */ err = emi62_set_reset(dev,1); if (err < 0) goto wraperr; rec = (const struct ihex_binrec *)loader_fw->data; /* 1. We need to put the loader for the FPGA into the EZ-USB */ while (rec) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; rec = ihex_next_binrec(rec); } /* De-assert reset (let the CPU run) */ err = emi62_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* 2. We upload the FPGA firmware into the EMI * Note: collect up to 1023 (yes!) bytes and send them with * a single request. This is _much_ faster! */ rec = (const struct ihex_binrec *)bitstream_fw->data; do { i = 0; addr = be32_to_cpu(rec->addr); /* intel hex records are terminated with type 0 element */ while (rec && (i + be16_to_cpu(rec->len) < FW_LOAD_SIZE)) { memcpy(buf + i, rec->data, be16_to_cpu(rec->len)); i += be16_to_cpu(rec->len); rec = ihex_next_binrec(rec); } err = emi62_writememory(dev, addr, buf, i, ANCHOR_LOAD_FPGA); if (err < 0) goto wraperr; } while (rec); /* Assert reset (stop the CPU in the EMI) */ err = emi62_set_reset(dev,1); if (err < 0) goto wraperr; /* 3. We need to put the loader for the firmware into the EZ-USB (again...) */ for (rec = (const struct ihex_binrec *)loader_fw->data; rec; rec = ihex_next_binrec(rec)) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_INTERNAL); if (err < 0) goto wraperr; } /* De-assert reset (let the CPU run) */ err = emi62_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ /* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */ for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (!INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_EXTERNAL); if (err < 0) goto wraperr; } } /* Assert reset (stop the CPU in the EMI) */ err = emi62_set_reset(dev,1); if (err < 0) goto wraperr; for (rec = (const struct ihex_binrec *)firmware_fw->data; rec; rec = ihex_next_binrec(rec)) { if (INTERNAL_RAM(be32_to_cpu(rec->addr))) { err = emi62_writememory(dev, be32_to_cpu(rec->addr), rec->data, be16_to_cpu(rec->len), ANCHOR_LOAD_EXTERNAL); if (err < 0) goto wraperr; } } /* De-assert reset (let the CPU run) */ err = emi62_set_reset(dev,0); if (err < 0) goto wraperr; msleep(250); /* let device settle */ release_firmware(loader_fw); release_firmware(bitstream_fw); release_firmware(firmware_fw); kfree(buf); /* return 1 to fail the driver inialization * and give real driver change to load */ return 1; wraperr: if (err < 0) dev_err(&dev->dev,"%s - error loading firmware: error = %d\n", __func__, err); release_firmware(loader_fw); release_firmware(bitstream_fw); release_firmware(firmware_fw); kfree(buf); dev_err(&dev->dev, "Error\n"); return err; } static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, id_table); static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); dev_dbg(&intf->dev, "emi62_probe\n"); dev_info(&intf->dev, "%s start\n", __func__); emi62_load_firmware(dev); /* do not return the driver context, let real audio driver do that */ return -EIO; } static void emi62_disconnect(struct usb_interface *intf) { } static struct usb_driver emi62_driver = { .name = "emi62 - firmware loader", .probe = emi62_probe, .disconnect = emi62_disconnect, .id_table = id_table, }; module_usb_driver(emi62_driver); MODULE_AUTHOR("Tapio Laxström"); MODULE_DESCRIPTION("Emagic EMI 6|2m firmware loader."); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("emi62/loader.fw"); MODULE_FIRMWARE("emi62/bitstream.fw"); MODULE_FIRMWARE(FIRMWARE_FW); /* vi:ai:syntax=c:sw=8:ts=8:tw=80 */
99 101 103 99 101 102 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 // SPDX-License-Identifier: GPL-2.0-or-later /* * xfrm_device.c - IPsec device offloading code. * * Copyright (c) 2015 secunet Security Networks AG * * Author: * Steffen Klassert <steffen.klassert@secunet.com> */ #include <linux/errno.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/dst.h> #include <net/gso.h> #include <net/xfrm.h> #include <linux/notifier.h> #ifdef CONFIG_XFRM_OFFLOAD static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb, unsigned int hsize) { struct xfrm_offload *xo = xfrm_offload(skb); skb_reset_mac_len(skb); if (xo->flags & XFRM_GSO_SEGMENT) skb->transport_header -= x->props.header_len; pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); } static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb, unsigned int hsize) { struct xfrm_offload *xo = xfrm_offload(skb); if (xo->flags & XFRM_GSO_SEGMENT) skb->transport_header = skb->network_header + hsize; skb_reset_mac_len(skb); pskb_pull(skb, skb->mac_len + x->props.header_len - x->props.enc_hdr_len); } static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb, unsigned int hsize) { struct xfrm_offload *xo = xfrm_offload(skb); int phlen = 0; if (xo->flags & XFRM_GSO_SEGMENT) skb->transport_header = skb->network_header + hsize; skb_reset_mac_len(skb); if (x->sel.family != AF_INET6) { phlen = IPV4_BEET_PHMAXLEN; if (x->outer_mode.family == AF_INET6) phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr); } pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen)); } /* Adjust pointers into the packet when IPsec is done at layer2 */ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb) { switch (x->outer_mode.encap) { case XFRM_MODE_IPTFS: case XFRM_MODE_TUNNEL: if (x->outer_mode.family == AF_INET) return __xfrm_mode_tunnel_prep(x, skb, sizeof(struct iphdr)); if (x->outer_mode.family == AF_INET6) return __xfrm_mode_tunnel_prep(x, skb, sizeof(struct ipv6hdr)); break; case XFRM_MODE_TRANSPORT: if (x->outer_mode.family == AF_INET) return __xfrm_transport_prep(x, skb, sizeof(struct iphdr)); if (x->outer_mode.family == AF_INET6) return __xfrm_transport_prep(x, skb, sizeof(struct ipv6hdr)); break; case XFRM_MODE_BEET: if (x->outer_mode.family == AF_INET) return __xfrm_mode_beet_prep(x, skb, sizeof(struct iphdr)); if (x->outer_mode.family == AF_INET6) return __xfrm_mode_beet_prep(x, skb, sizeof(struct ipv6hdr)); break; case XFRM_MODE_ROUTEOPTIMIZATION: case XFRM_MODE_IN_TRIGGER: break; } } static inline bool xmit_xfrm_check_overflow(struct sk_buff *skb) { struct xfrm_offload *xo = xfrm_offload(skb); __u32 seq = xo->seq.low; seq += skb_shinfo(skb)->gso_segs; if (unlikely(seq < xo->seq.low)) return true; return false; } struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) { int err; unsigned long flags; struct xfrm_state *x; struct softnet_data *sd; struct sk_buff *skb2, *nskb, *pskb = NULL; netdev_features_t esp_features = features; struct xfrm_offload *xo = xfrm_offload(skb); struct net_device *dev = skb->dev; struct sec_path *sp; if (!xo || (xo->flags & XFRM_XMIT)) return skb; if (!(features & NETIF_F_HW_ESP)) esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); sp = skb_sec_path(skb); x = sp->xvec[sp->len - 1]; if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN) return skb; /* The packet was sent to HW IPsec packet offload engine, * but to wrong device. Drop the packet, so it won't skip * XFRM stack. */ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) { kfree_skb(skb); dev_core_stats_tx_dropped_inc(dev); return NULL; } /* This skb was already validated on the upper/virtual dev */ if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) return skb; local_irq_save(flags); sd = this_cpu_ptr(&softnet_data); err = !skb_queue_empty(&sd->xfrm_backlog); local_irq_restore(flags); if (err) { *again = true; return skb; } if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || unlikely(xmit_xfrm_check_overflow(skb)))) { struct sk_buff *segs; /* Packet got rerouted, fixup features and segment it. */ esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); segs = skb_gso_segment(skb, esp_features); if (IS_ERR(segs)) { kfree_skb(skb); dev_core_stats_tx_dropped_inc(dev); return NULL; } else { consume_skb(skb); skb = segs; } } if (!skb->next) { esp_features |= skb->dev->gso_partial_features; xfrm_outer_mode_prep(x, skb); xo->flags |= XFRM_DEV_RESUME; err = x->type_offload->xmit(x, skb, esp_features); if (err) { if (err == -EINPROGRESS) return NULL; XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); kfree_skb(skb); return NULL; } skb_push(skb, skb->data - skb_mac_header(skb)); return skb; } skb_list_walk_safe(skb, skb2, nskb) { esp_features |= skb->dev->gso_partial_features; skb_mark_not_on_list(skb2); xo = xfrm_offload(skb2); xo->flags |= XFRM_DEV_RESUME; xfrm_outer_mode_prep(x, skb2); err = x->type_offload->xmit(x, skb2, esp_features); if (!err) { skb2->next = nskb; } else if (err != -EINPROGRESS) { XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); skb2->next = nskb; kfree_skb_list(skb2); return NULL; } else { if (skb == skb2) skb = nskb; else pskb->next = nskb; continue; } skb_push(skb2, skb2->data - skb_mac_header(skb2)); pskb = skb2; } return skb; } EXPORT_SYMBOL_GPL(validate_xmit_xfrm); int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack) { int err; struct dst_entry *dst; struct net_device *dev; struct xfrm_dev_offload *xso = &x->xso; xfrm_address_t *saddr; xfrm_address_t *daddr; bool is_packet_offload; if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) { NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); return -EINVAL; } if ((xuo->flags & XFRM_OFFLOAD_INBOUND && x->dir == XFRM_SA_DIR_OUT) || (!(xuo->flags & XFRM_OFFLOAD_INBOUND) && x->dir == XFRM_SA_DIR_IN)) { NL_SET_ERR_MSG(extack, "Mismatched SA and offload direction"); return -EINVAL; } is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET; /* We don't yet support TFC padding. */ if (x->tfcpad) { NL_SET_ERR_MSG(extack, "TFC padding can't be offloaded"); return -EINVAL; } dev = dev_get_by_index(net, xuo->ifindex); if (!dev) { struct xfrm_dst_lookup_params params; if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { saddr = &x->props.saddr; daddr = &x->id.daddr; } else { saddr = &x->id.daddr; daddr = &x->props.saddr; } memset(&params, 0, sizeof(params)); params.net = net; params.saddr = saddr; params.daddr = daddr; params.mark = xfrm_smark_get(0, x); dst = __xfrm_dst_lookup(x->props.family, &params); if (IS_ERR(dst)) return (is_packet_offload) ? -EINVAL : 0; dev = dst->dev; dev_hold(dev); dst_release(dst); } if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { xso->dev = NULL; dev_put(dev); return (is_packet_offload) ? -EINVAL : 0; } if (!is_packet_offload && x->props.flags & XFRM_STATE_ESN && !dev->xfrmdev_ops->xdo_dev_state_advance_esn) { NL_SET_ERR_MSG(extack, "Device doesn't support offload with ESN"); xso->dev = NULL; dev_put(dev); return -EINVAL; } xfrm_set_type_offload(x); if (!x->type_offload) { NL_SET_ERR_MSG(extack, "Type doesn't support offload"); dev_put(dev); return -EINVAL; } xso->dev = dev; netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); xso->real_dev = dev; if (xuo->flags & XFRM_OFFLOAD_INBOUND) xso->dir = XFRM_DEV_OFFLOAD_IN; else xso->dir = XFRM_DEV_OFFLOAD_OUT; if (is_packet_offload) xso->type = XFRM_DEV_OFFLOAD_PACKET; else xso->type = XFRM_DEV_OFFLOAD_CRYPTO; err = dev->xfrmdev_ops->xdo_dev_state_add(x, extack); if (err) { xso->dev = NULL; xso->dir = 0; xso->real_dev = NULL; netdev_put(dev, &xso->dev_tracker); xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; xfrm_unset_type_offload(x); /* User explicitly requested packet offload mode and configured * policy in addition to the XFRM state. So be civil to users, * and return an error instead of taking fallback path. */ if ((err != -EOPNOTSUPP && !is_packet_offload) || is_packet_offload) { NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state"); return err; } } return 0; } EXPORT_SYMBOL_GPL(xfrm_dev_state_add); int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, struct xfrm_user_offload *xuo, u8 dir, struct netlink_ext_ack *extack) { struct xfrm_dev_offload *xdo = &xp->xdo; struct net_device *dev; int err; if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) { /* We support only packet offload mode and it means * that user must set XFRM_OFFLOAD_PACKET bit. */ NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); return -EINVAL; } dev = dev_get_by_index(net, xuo->ifindex); if (!dev) return -EINVAL; if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) { xdo->dev = NULL; dev_put(dev); NL_SET_ERR_MSG(extack, "Policy offload is not supported"); return -EINVAL; } xdo->dev = dev; netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC); xdo->real_dev = dev; xdo->type = XFRM_DEV_OFFLOAD_PACKET; switch (dir) { case XFRM_POLICY_IN: xdo->dir = XFRM_DEV_OFFLOAD_IN; break; case XFRM_POLICY_OUT: xdo->dir = XFRM_DEV_OFFLOAD_OUT; break; case XFRM_POLICY_FWD: xdo->dir = XFRM_DEV_OFFLOAD_FWD; break; default: xdo->dev = NULL; netdev_put(dev, &xdo->dev_tracker); NL_SET_ERR_MSG(extack, "Unrecognized offload direction"); return -EINVAL; } err = dev->xfrmdev_ops->xdo_dev_policy_add(xp, extack); if (err) { xdo->dev = NULL; xdo->real_dev = NULL; xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; xdo->dir = 0; netdev_put(dev, &xdo->dev_tracker); NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this policy"); return err; } return 0; } EXPORT_SYMBOL_GPL(xfrm_dev_policy_add); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) { int mtu; struct dst_entry *dst = skb_dst(skb); struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct net_device *dev = x->xso.dev; bool check_tunnel_size; if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED) return false; if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) { mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); if (skb->len <= mtu) goto ok; if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) goto ok; } return false; ok: check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->props.mode == XFRM_MODE_TUNNEL; switch (x->props.family) { case AF_INET: /* Check for IPv4 options */ if (ip_hdr(skb)->ihl != 5) return false; if (check_tunnel_size && xfrm4_tunnel_check_size(skb)) return false; break; case AF_INET6: /* Check for IPv6 extensions */ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) return false; if (check_tunnel_size && xfrm6_tunnel_check_size(skb)) return false; break; default: break; } if (dev->xfrmdev_ops->xdo_dev_offload_ok) return dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); return true; } EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); void xfrm_dev_resume(struct sk_buff *skb) { struct net_device *dev = skb->dev; int ret = NETDEV_TX_BUSY; struct netdev_queue *txq; struct softnet_data *sd; unsigned long flags; rcu_read_lock(); txq = netdev_core_pick_tx(dev, skb, NULL); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) skb = dev_hard_start_xmit(skb, dev, txq, &ret); HARD_TX_UNLOCK(dev, txq); if (!dev_xmit_complete(ret)) { local_irq_save(flags); sd = this_cpu_ptr(&softnet_data); skb_queue_tail(&sd->xfrm_backlog, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(xfrm_dev_resume); void xfrm_dev_backlog(struct softnet_data *sd) { struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; struct sk_buff_head list; struct sk_buff *skb; if (skb_queue_empty(xfrm_backlog)) return; __skb_queue_head_init(&list); spin_lock(&xfrm_backlog->lock); skb_queue_splice_init(xfrm_backlog, &list); spin_unlock(&xfrm_backlog->lock); while (!skb_queue_empty(&list)) { skb = __skb_dequeue(&list); xfrm_dev_resume(skb); } } #endif static int xfrm_api_check(struct net_device *dev) { #ifdef CONFIG_XFRM_OFFLOAD if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && !(dev->features & NETIF_F_HW_ESP)) return NOTIFY_BAD; if ((dev->features & NETIF_F_HW_ESP) && (!(dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_state_add && dev->xfrmdev_ops->xdo_dev_state_delete))) return NOTIFY_BAD; #else if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) return NOTIFY_BAD; #endif return NOTIFY_DONE; } static int xfrm_dev_down(struct net_device *dev) { if (dev->features & NETIF_F_HW_ESP) { xfrm_dev_state_flush(dev_net(dev), dev, true); xfrm_dev_policy_flush(dev_net(dev), dev, true); } return NOTIFY_DONE; } static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_REGISTER: return xfrm_api_check(dev); case NETDEV_FEAT_CHANGE: return xfrm_api_check(dev); case NETDEV_DOWN: case NETDEV_UNREGISTER: return xfrm_dev_down(dev); } return NOTIFY_DONE; } static struct notifier_block xfrm_dev_notifier = { .notifier_call = xfrm_dev_event, }; void __init xfrm_dev_init(void) { register_netdevice_notifier(&xfrm_dev_notifier); }
10 10 10 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * Copyright 2016 Intel Corp. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DRM_DRV_H_ #define _DRM_DRV_H_ #include <linux/list.h> #include <linux/irqreturn.h> #include <video/nomodeset.h> #include <drm/drm_device.h> struct dmem_cgroup_region; struct drm_fb_helper; struct drm_fb_helper_surface_size; struct drm_file; struct drm_gem_object; struct drm_master; struct drm_minor; struct dma_buf; struct dma_buf_attachment; struct drm_display_mode; struct drm_mode_create_dumb; struct drm_printer; struct sg_table; /** * enum drm_driver_feature - feature flags * * See &drm_driver.driver_features, drm_device.driver_features and * drm_core_check_feature(). */ enum drm_driver_feature { /** * @DRIVER_GEM: * * Driver use the GEM memory manager. This should be set for all modern * drivers. */ DRIVER_GEM = BIT(0), /** * @DRIVER_MODESET: * * Driver supports mode setting interfaces (KMS). */ DRIVER_MODESET = BIT(1), /** * @DRIVER_RENDER: * * Driver supports dedicated render nodes. See also the :ref:`section on * render nodes <drm_render_node>` for details. */ DRIVER_RENDER = BIT(3), /** * @DRIVER_ATOMIC: * * Driver supports the full atomic modesetting userspace API. Drivers * which only use atomic internally, but do not support the full * userspace API (e.g. not all properties converted to atomic, or * multi-plane updates are not guaranteed to be tear-free) should not * set this flag. */ DRIVER_ATOMIC = BIT(4), /** * @DRIVER_SYNCOBJ: * * Driver supports &drm_syncobj for explicit synchronization of command * submission. */ DRIVER_SYNCOBJ = BIT(5), /** * @DRIVER_SYNCOBJ_TIMELINE: * * Driver supports the timeline flavor of &drm_syncobj for explicit * synchronization of command submission. */ DRIVER_SYNCOBJ_TIMELINE = BIT(6), /** * @DRIVER_COMPUTE_ACCEL: * * Driver supports compute acceleration devices. This flag is mutually exclusive with * @DRIVER_RENDER and @DRIVER_MODESET. Devices that support both graphics and compute * acceleration should be handled by two drivers that are connected using auxiliary bus. */ DRIVER_COMPUTE_ACCEL = BIT(7), /** * @DRIVER_GEM_GPUVA: * * Driver supports user defined GPU VA bindings for GEM objects. */ DRIVER_GEM_GPUVA = BIT(8), /** * @DRIVER_CURSOR_HOTSPOT: * * Driver supports and requires cursor hotspot information in the * cursor plane (e.g. cursor plane has to actually track the mouse * cursor and the clients are required to set hotspot in order for * the cursor planes to work correctly). */ DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ /** * @DRIVER_USE_AGP: * * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage * AGP resources. New drivers don't need this. */ DRIVER_USE_AGP = BIT(25), /** * @DRIVER_LEGACY: * * Denote a legacy driver using shadow attach. Do not use. */ DRIVER_LEGACY = BIT(26), /** * @DRIVER_PCI_DMA: * * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace * will be enabled. Only for legacy drivers. Do not use. */ DRIVER_PCI_DMA = BIT(27), /** * @DRIVER_SG: * * Driver can perform scatter/gather DMA, allocation and mapping of * scatter/gather buffers will be enabled. Only for legacy drivers. Do * not use. */ DRIVER_SG = BIT(28), /** * @DRIVER_HAVE_DMA: * * Driver supports DMA, the userspace DMA API will be supported. Only * for legacy drivers. Do not use. */ DRIVER_HAVE_DMA = BIT(29), /** * @DRIVER_HAVE_IRQ: * * Legacy irq support. Only for legacy drivers. Do not use. */ DRIVER_HAVE_IRQ = BIT(30), }; /** * struct drm_driver - DRM driver structure * * This structure represent the common code for a family of cards. There will be * one &struct drm_device for each card present in this family. It contains lots * of vfunc entries, and a pile of those probably should be moved to more * appropriate places like &drm_mode_config_funcs or into a new operations * structure for GEM drivers. */ struct drm_driver { /** * @load: * * Backward-compatible driver callback to complete initialization steps * after the driver is registered. For this reason, may suffer from * race conditions and its use is deprecated for new drivers. It is * therefore only supported for existing drivers not yet converted to * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for * proper and race-free way to set up a &struct drm_device. * * This is deprecated, do not use! * * Returns: * * Zero on success, non-zero value on failure. */ int (*load) (struct drm_device *, unsigned long flags); /** * @open: * * Driver callback when a new &struct drm_file is opened. Useful for * setting up driver-private data structures like buffer allocators, * execution contexts or similar things. Such driver-private resources * must be released again in @postclose. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to set up any modeset related resources * in this callback. Doing so would be a driver design bug. * * Returns: * * 0 on success, a negative error code on failure, which will be * promoted to userspace as the result of the open() system call. */ int (*open) (struct drm_device *, struct drm_file *); /** * @postclose: * * One of the driver callbacks when a new &struct drm_file is closed. * Useful for tearing down driver-private data structures allocated in * @open like buffer allocators, execution contexts or similar things. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to tear down any modeset related * resources in this callback. Doing so would be a driver design bug. */ void (*postclose) (struct drm_device *, struct drm_file *); /** * @unload: * * Reverse the effects of the driver load callback. Ideally, * the clean up performed by the driver should happen in the * reverse order of the initialization. Similarly to the load * hook, this handler is deprecated and its usage should be * dropped in favor of an open-coded teardown function at the * driver layer. See drm_dev_unregister() and drm_dev_put() * for the proper way to remove a &struct drm_device. * * The unload() hook is called right after unregistering * the device. * */ void (*unload) (struct drm_device *); /** * @release: * * Optional callback for destroying device data after the final * reference is released, i.e. the device is being destroyed. * * This is deprecated, clean up all memory allocations associated with a * &drm_device using drmm_add_action(), drmm_kmalloc() and related * managed resources functions. */ void (*release) (struct drm_device *); /** * @master_set: * * Called whenever the minor master is set. Only used by vmwgfx. */ void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, bool from_open); /** * @master_drop: * * Called whenever the minor master is dropped. Only used by vmwgfx. */ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); /** * @debugfs_init: * * Allows drivers to create driver-specific debugfs files. */ void (*debugfs_init)(struct drm_minor *minor); /** * @gem_create_object: constructor for gem objects * * Hook for allocating the GEM object struct, for use by the CMA * and SHMEM GEM helpers. Returns a GEM object on success, or an * ERR_PTR()-encoded error code otherwise. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); /** * @prime_handle_to_fd: * * PRIME export function. Only used by vmwgfx. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * * PRIME import function. Only used by vmwgfx. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); /** * @gem_prime_import: * * Import hook for GEM drivers. * * This defaults to drm_gem_prime_import() if not set. */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); /** * @gem_prime_import_sg_table: * * Optional hook used by the PRIME helper functions * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). */ struct drm_gem_object *(*gem_prime_import_sg_table)( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); /** * @dumb_create: * * This creates a new dumb buffer in the driver's backing storage manager (GEM, * TTM or something else entirely) and returns the resulting buffer handle. This * handle can then be wrapped up into a framebuffer modeset object. * * Note that userspace is not allowed to use such objects for render * acceleration - drivers must create their own private ioctls for such a use * case. * * Width, height and depth are specified in the &drm_mode_create_dumb * argument. The callback needs to fill the handle, pitch and size for * the created buffer. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); /** * @dumb_map_offset: * * Allocate an offset in the drm device node's address space to be able to * memory map a dumb buffer. * * The default implementation is drm_gem_create_mmap_offset(). GEM based * drivers must not overwrite this. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); /** * @fbdev_probe: * * Allocates and initialize the fb_info structure for fbdev emulation. * Furthermore it also needs to allocate the DRM framebuffer used to * back the fbdev. * * This callback is mandatory for fbdev support. * * Returns: * * 0 on success ot a negative error code otherwise. */ int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper, struct drm_fb_helper_surface_size *sizes); /** * @show_fdinfo: * * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst. */ void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f); /** @major: driver major number */ int major; /** @minor: driver minor number */ int minor; /** @patchlevel: driver patch level */ int patchlevel; /** @name: driver name */ char *name; /** @desc: driver description */ char *desc; /** * @driver_features: * Driver features, see &enum drm_driver_feature. Drivers can disable * some features on a per-instance basis using * &drm_device.driver_features. */ u32 driver_features; /** * @ioctls: * * Array of driver-private IOCTL description entries. See the chapter on * :ref:`IOCTL support in the userland interfaces * chapter<drm_driver_ioctl>` for the full details. */ const struct drm_ioctl_desc *ioctls; /** @num_ioctls: Number of entries in @ioctls. */ int num_ioctls; /** * @fops: * * File operations for the DRM device node. See the discussion in * :ref:`file operations<drm_driver_fops>` for in-depth coverage and * some examples. */ const struct file_operations *fops; }; void *__devm_drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); struct dmem_cgroup_region * drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size); /** * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance * @parent: Parent device object * @driver: DRM driver * @type: the type of the struct which contains struct &drm_device * @member: the name of the &drm_device within @type. * * This allocates and initialize a new DRM device. No device registration is done. * Call drm_dev_register() to advertice the device to user space and register it * with other core subsystems. This should be done last in the device * initialization sequence to make sure userspace can't access an inconsistent * state. * * The initial ref-count of the object is 1. Use drm_dev_get() and * drm_dev_put() to take and drop further ref-counts. * * It is recommended that drivers embed &struct drm_device into their own device * structure. * * Note that this manages the lifetime of the resulting &drm_device * automatically using devres. The DRM device initialized with this function is * automatically put on driver detach using drm_dev_put(). * * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. */ #define devm_drm_dev_alloc(parent, driver, type, member) \ ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ offsetof(type, member))) struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); void drm_dev_get(struct drm_device *dev); void drm_dev_put(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); bool drm_dev_enter(struct drm_device *dev, int *idx); void drm_dev_exit(int idx); void drm_dev_unplug(struct drm_device *dev); int drm_dev_wedged_event(struct drm_device *dev, unsigned long method); /** * drm_dev_is_unplugged - is a DRM device unplugged * @dev: DRM device * * This function can be called to check whether a hotpluggable is unplugged. * Unplugging itself is singalled through drm_dev_unplug(). If a device is * unplugged, these two functions guarantee that any store before calling * drm_dev_unplug() is visible to callers of this function after it completes * * WARNING: This function fundamentally races against drm_dev_unplug(). It is * recommended that drivers instead use the underlying drm_dev_enter() and * drm_dev_exit() function pairs. */ static inline bool drm_dev_is_unplugged(struct drm_device *dev) { int idx; if (drm_dev_enter(dev, &idx)) { drm_dev_exit(idx); return false; } return true; } /** * drm_core_check_all_features - check driver feature flags mask * @dev: DRM device to check * @features: feature flag(s) mask * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if all features in the @features mask are supported, false * otherwise. */ static inline bool drm_core_check_all_features(const struct drm_device *dev, u32 features) { u32 supported = dev->driver->driver_features & dev->driver_features; return features && (supported & features) == features; } /** * drm_core_check_feature - check driver feature flags * @dev: DRM device to check * @feature: feature flag * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if the @feature is supported, false otherwise. */ static inline bool drm_core_check_feature(const struct drm_device *dev, enum drm_driver_feature feature) { return drm_core_check_all_features(dev, feature); } /** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() * @dev: DRM device * * This check is useful if drivers do not have DRIVER_ATOMIC set but * have atomic modesetting internally implemented. */ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_ATOMIC) || (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); } /* TODO: Inline drm_firmware_drivers_only() in all its callers. */ static inline bool drm_firmware_drivers_only(void) { return video_firmware_drivers_only(); } #if defined(CONFIG_DEBUG_FS) void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root); #else static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) { } #endif #endif
8 247 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef LLIST_H #define LLIST_H /* * Lock-less NULL terminated single linked list * * Cases where locking is not needed: * If there are multiple producers and multiple consumers, llist_add can be * used in producers and llist_del_all can be used in consumers simultaneously * without locking. Also a single consumer can use llist_del_first while * multiple producers simultaneously use llist_add, without any locking. * * Cases where locking is needed: * If we have multiple consumers with llist_del_first used in one consumer, and * llist_del_first or llist_del_all used in other consumers, then a lock is * needed. This is because llist_del_first depends on list->first->next not * changing, but without lock protection, there's no way to be sure about that * if a preemption happens in the middle of the delete operation and on being * preempted back, the list->first is the same as before causing the cmpxchg in * llist_del_first to succeed. For example, while a llist_del_first operation * is in progress in one consumer, then a llist_del_first, llist_add, * llist_add (or llist_del_all, llist_add, llist_add) sequence in another * consumer may cause violations. * * This can be summarized as follows: * * | add | del_first | del_all * add | - | - | - * del_first | | L | L * del_all | | | - * * Where, a particular row's operation can happen concurrently with a column's * operation, with "-" being no lock needed, while "L" being lock is needed. * * The list entries deleted via llist_del_all can be traversed with * traversing function such as llist_for_each etc. But the list * entries can not be traversed safely before deleted from the list. * The order of deleted entries is from the newest to the oldest added * one. If you want to traverse from the oldest to the newest, you * must reverse the order by yourself before traversing. * * The basic atomic operation of this list is cmpxchg on long. On * architectures that don't have NMI-safe cmpxchg implementation, the * list can NOT be used in NMI handlers. So code that uses the list in * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> */ #include <linux/atomic.h> #include <linux/container_of.h> #include <linux/stddef.h> #include <linux/types.h> struct llist_head { struct llist_node *first; }; struct llist_node { struct llist_node *next; }; #define LLIST_HEAD_INIT(name) { NULL } #define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name) /** * init_llist_head - initialize lock-less list head * @head: the head for your lock-less list */ static inline void init_llist_head(struct llist_head *list) { list->first = NULL; } /** * init_llist_node - initialize lock-less list node * @node: the node to be initialised * * In cases where there is a need to test if a node is on * a list or not, this initialises the node to clearly * not be on any list. */ static inline void init_llist_node(struct llist_node *node) { node->next = node; } /** * llist_on_list - test if a lock-list list node is on a list * @node: the node to test * * When a node is on a list the ->next pointer will be NULL or * some other node. It can never point to itself. We use that * in init_llist_node() to record that a node is not on any list, * and here to test whether it is on any list. */ static inline bool llist_on_list(const struct llist_node *node) { return node->next != node; } /** * llist_entry - get the struct of this entry * @ptr: the &struct llist_node pointer. * @type: the type of the struct this is embedded in. * @member: the name of the llist_node within the struct. */ #define llist_entry(ptr, type, member) \ container_of(ptr, type, member) /** * member_address_is_nonnull - check whether the member address is not NULL * @ptr: the object pointer (struct type * that contains the llist_node) * @member: the name of the llist_node within the struct. * * This macro is conceptually the same as * &ptr->member != NULL * but it works around the fact that compilers can decide that taking a member * address is never a NULL pointer. * * Real objects that start at a high address and have a member at NULL are * unlikely to exist, but such pointers may be returned e.g. by the * container_of() macro. */ #define member_address_is_nonnull(ptr, member) \ ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0) /** * llist_for_each - iterate over some deleted entries of a lock-less list * @pos: the &struct llist_node to use as a loop cursor * @node: the first entry of deleted list entries * * In general, some entries of the lock-less list can be traversed * safely only after being deleted from list, so start with an entry * instead of list head. * * If being used on entries deleted from lock-less list directly, the * traverse order is from the newest to the oldest added entry. If * you want to traverse from the oldest to the newest, you must * reverse the order by yourself before traversing. */ #define llist_for_each(pos, node) \ for ((pos) = (node); pos; (pos) = (pos)->next) /** * llist_for_each_safe - iterate over some deleted entries of a lock-less list * safe against removal of list entry * @pos: the &struct llist_node to use as a loop cursor * @n: another &struct llist_node to use as temporary storage * @node: the first entry of deleted list entries * * In general, some entries of the lock-less list can be traversed * safely only after being deleted from list, so start with an entry * instead of list head. * * If being used on entries deleted from lock-less list directly, the * traverse order is from the newest to the oldest added entry. If * you want to traverse from the oldest to the newest, you must * reverse the order by yourself before traversing. */ #define llist_for_each_safe(pos, n, node) \ for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n)) /** * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type * @pos: the type * to use as a loop cursor. * @node: the fist entry of deleted list entries. * @member: the name of the llist_node with the struct. * * In general, some entries of the lock-less list can be traversed * safely only after being removed from list, so start with an entry * instead of list head. * * If being used on entries deleted from lock-less list directly, the * traverse order is from the newest to the oldest added entry. If * you want to traverse from the oldest to the newest, you must * reverse the order by yourself before traversing. */ #define llist_for_each_entry(pos, node, member) \ for ((pos) = llist_entry((node), typeof(*(pos)), member); \ member_address_is_nonnull(pos, member); \ (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) /** * llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type * safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @node: the first entry of deleted list entries. * @member: the name of the llist_node with the struct. * * In general, some entries of the lock-less list can be traversed * safely only after being removed from list, so start with an entry * instead of list head. * * If being used on entries deleted from lock-less list directly, the * traverse order is from the newest to the oldest added entry. If * you want to traverse from the oldest to the newest, you must * reverse the order by yourself before traversing. */ #define llist_for_each_entry_safe(pos, n, node, member) \ for (pos = llist_entry((node), typeof(*pos), member); \ member_address_is_nonnull(pos, member) && \ (n = llist_entry(pos->member.next, typeof(*n), member), true); \ pos = n) /** * llist_empty - tests whether a lock-less list is empty * @head: the list to test * * Not guaranteed to be accurate or up to date. Just a quick way to * test whether the list is empty without deleting something from the * list. */ static inline bool llist_empty(const struct llist_head *head) { return READ_ONCE(head->first) == NULL; } static inline struct llist_node *llist_next(struct llist_node *node) { return node->next; } extern bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head); static inline bool __llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head) { new_last->next = head->first; head->first = new_first; return new_last->next == NULL; } /** * llist_add - add a new entry * @new: new entry to be added * @head: the head for your lock-less list * * Returns true if the list was empty prior to adding this entry. */ static inline bool llist_add(struct llist_node *new, struct llist_head *head) { return llist_add_batch(new, new, head); } static inline bool __llist_add(struct llist_node *new, struct llist_head *head) { return __llist_add_batch(new, new, head); } /** * llist_del_all - delete all entries from lock-less list * @head: the head of lock-less list to delete all entries * * If list is empty, return NULL, otherwise, delete all entries and * return the pointer to the first entry. The order of entries * deleted is from the newest to the oldest added one. */ static inline struct llist_node *llist_del_all(struct llist_head *head) { return xchg(&head->first, NULL); } static inline struct llist_node *__llist_del_all(struct llist_head *head) { struct llist_node *first = head->first; head->first = NULL; return first; } extern struct llist_node *llist_del_first(struct llist_head *head); /** * llist_del_first_init - delete first entry from lock-list and mark is as being off-list * @head: the head of lock-less list to delete from. * * This behave the same as llist_del_first() except that llist_init_node() is called * on the returned node so that llist_on_list() will report false for the node. */ static inline struct llist_node *llist_del_first_init(struct llist_head *head) { struct llist_node *n = llist_del_first(head); if (n) init_llist_node(n); return n; } extern bool llist_del_first_this(struct llist_head *head, struct llist_node *this); struct llist_node *llist_reverse_order(struct llist_node *head); #endif /* LLIST_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BLOCK_BLK_PM_H_ #define _BLOCK_BLK_PM_H_ #include <linux/pm_runtime.h> #ifdef CONFIG_PM static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) { if (!q->dev || !blk_queue_pm_only(q)) return 1; /* Nothing to do */ if (pm && q->rpm_status != RPM_SUSPENDED) return 1; /* Request allowed */ pm_request_resume(q->dev); return 0; } static inline void blk_pm_mark_last_busy(struct request *rq) { if (rq->q->dev && !(rq->rq_flags & RQF_PM)) pm_runtime_mark_last_busy(rq->q->dev); } #else static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q) { return 1; } static inline void blk_pm_mark_last_busy(struct request *rq) { } #endif #endif /* _BLOCK_BLK_PM_H_ */
5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 // SPDX-License-Identifier: GPL-2.0 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/hardirq.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <linux/kthread.h> #include <linux/kfifo.h> #include <net/cfg80211.h> #include "mesh.h" #include "decl.h" #include "cmd.h" static int lbs_add_mesh(struct lbs_private *priv); /*************************************************************************** * Mesh command handling */ static int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, struct cmd_ds_mesh_access *cmd) { int ret; cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS); cmd->hdr.size = cpu_to_le16(sizeof(*cmd)); cmd->hdr.result = 0; cmd->action = cpu_to_le16(cmd_action); ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd); return ret; } static int __lbs_mesh_config_send(struct lbs_private *priv, struct cmd_ds_mesh_config *cmd, uint16_t action, uint16_t type) { int ret; u16 command = CMD_MESH_CONFIG_OLD; /* * Command id is 0xac for v10 FW along with mesh interface * id in bits 14-13-12. */ if (priv->mesh_tlv == TLV_TYPE_MESH_ID) command = CMD_MESH_CONFIG | (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET); cmd->hdr.command = cpu_to_le16(command); cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config)); cmd->hdr.result = 0; cmd->type = cpu_to_le16(type); cmd->action = cpu_to_le16(action); ret = lbs_cmd_with_response(priv, command, cmd); return ret; } static int lbs_mesh_config_send(struct lbs_private *priv, struct cmd_ds_mesh_config *cmd, uint16_t action, uint16_t type) { int ret; if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG)) return -EOPNOTSUPP; ret = __lbs_mesh_config_send(priv, cmd, action, type); return ret; } /* This function is the CMD_MESH_CONFIG legacy function. It only handles the * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG * are all handled by preparing a struct cmd_ds_mesh_config and passing it to * lbs_mesh_config_send. */ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan) { struct wireless_dev *mesh_wdev; struct cmd_ds_mesh_config cmd; struct mrvl_meshie *ie; memset(&cmd, 0, sizeof(cmd)); cmd.channel = cpu_to_le16(chan); ie = (struct mrvl_meshie *)cmd.data; switch (action) { case CMD_ACT_MESH_CONFIG_START: ie->id = WLAN_EID_VENDOR_SPECIFIC; ie->val.oui[0] = 0x00; ie->val.oui[1] = 0x50; ie->val.oui[2] = 0x43; ie->val.type = MARVELL_MESH_IE_TYPE; ie->val.subtype = MARVELL_MESH_IE_SUBTYPE; ie->val.version = MARVELL_MESH_IE_VERSION; ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP; ie->val.active_metric_id = MARVELL_MESH_METRIC_ID; ie->val.mesh_capability = MARVELL_MESH_CAPABILITY; if (priv->mesh_dev) { mesh_wdev = priv->mesh_dev->ieee80211_ptr; ie->val.mesh_id_len = mesh_wdev->u.mesh.id_up_len; memcpy(ie->val.mesh_id, mesh_wdev->u.mesh.id, mesh_wdev->u.mesh.id_up_len); } ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + ie->val.mesh_id_len; cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); break; case CMD_ACT_MESH_CONFIG_STOP: break; default: return -1; } lbs_deb_cmd("mesh config action %d type %x channel %d SSID %*pE\n", action, priv->mesh_tlv, chan, ie->val.mesh_id_len, ie->val.mesh_id); return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); } int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel) { priv->mesh_channel = channel; return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel); } static uint16_t lbs_mesh_get_channel(struct lbs_private *priv) { return priv->mesh_channel ?: 1; } /*************************************************************************** * Mesh sysfs support */ /* * Attributes exported through sysfs */ /** * anycast_mask_show - Get function for sysfs attribute anycast_mask * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t anycast_mask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; memset(&mesh_access, 0, sizeof(mesh_access)); ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_ANYCAST, &mesh_access); if (ret) return ret; return sysfs_emit(buf, "0x%X\n", le32_to_cpu(mesh_access.data[0])); } /** * anycast_mask_store - Set function for sysfs attribute anycast_mask * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t anycast_mask_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; uint32_t datum; int ret; ret = kstrtouint(buf, 16, &datum); if (ret) return ret; memset(&mesh_access, 0, sizeof(mesh_access)); mesh_access.data[0] = cpu_to_le32(datum); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_ANYCAST, &mesh_access); if (ret) return ret; return strlen(buf); } /** * prb_rsp_limit_show - Get function for sysfs attribute prb_rsp_limit * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t prb_rsp_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; u32 retry_limit; memset(&mesh_access, 0, sizeof(mesh_access)); mesh_access.data[0] = cpu_to_le32(CMD_ACT_GET); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT, &mesh_access); if (ret) return ret; retry_limit = le32_to_cpu(mesh_access.data[1]); return sysfs_emit(buf, "%d\n", retry_limit); } /** * prb_rsp_limit_store - Set function for sysfs attribute prb_rsp_limit * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t prb_rsp_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; unsigned long retry_limit; ret = kstrtoul(buf, 10, &retry_limit); if (ret) return ret; if (retry_limit > 15) return -ENOTSUPP; memset(&mesh_access, 0, sizeof(mesh_access)); mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET); mesh_access.data[1] = cpu_to_le32(retry_limit); ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT, &mesh_access); if (ret) return ret; return strlen(buf); } /** * lbs_mesh_show - Get function for sysfs attribute mesh * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t lbs_mesh_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; return sysfs_emit(buf, "0x%X\n", !!priv->mesh_dev); } /** * lbs_mesh_store - Set function for sysfs attribute mesh * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t lbs_mesh_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; int ret, enable; ret = kstrtoint(buf, 16, &enable); if (ret) return ret; enable = !!enable; if (enable == !!priv->mesh_dev) return count; if (enable) lbs_add_mesh(priv); else lbs_remove_mesh(priv); return count; } /* * lbs_mesh attribute to be exported per ethX interface * through sysfs (/sys/class/net/ethX/lbs_mesh) */ static DEVICE_ATTR_RW(lbs_mesh); /* * anycast_mask attribute to be exported per mshX interface * through sysfs (/sys/class/net/mshX/anycast_mask) */ static DEVICE_ATTR_RW(anycast_mask); /* * prb_rsp_limit attribute to be exported per mshX interface * through sysfs (/sys/class/net/mshX/prb_rsp_limit) */ static DEVICE_ATTR_RW(prb_rsp_limit); static struct attribute *lbs_mesh_sysfs_entries[] = { &dev_attr_anycast_mask.attr, &dev_attr_prb_rsp_limit.attr, NULL, }; static const struct attribute_group lbs_mesh_attr_group = { .attrs = lbs_mesh_sysfs_entries, }; /*************************************************************************** * Persistent configuration support */ static int mesh_get_default_parameters(struct device *dev, struct mrvl_mesh_defaults *defs) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; int ret; memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_GET, CMD_TYPE_MESH_GET_DEFAULTS); if (ret) return -EOPNOTSUPP; memcpy(defs, &cmd.data[0], sizeof(struct mrvl_mesh_defaults)); return 0; } /** * bootflag_show - Get function for sysfs attribute bootflag * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t bootflag_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return sysfs_emit(buf, "%d\n", le32_to_cpu(defs.bootflag)); } /** * bootflag_store - Set function for sysfs attribute bootflag * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t bootflag_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; uint32_t datum; int ret; ret = kstrtouint(buf, 10, &datum); if (ret) return ret; if (datum > 1) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); *((__le32 *)&cmd.data[0]) = cpu_to_le32(!!datum); cmd.length = cpu_to_le16(sizeof(uint32_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_BOOTFLAG); if (ret) return ret; return strlen(buf); } /** * boottime_show - Get function for sysfs attribute boottime * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t boottime_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return sysfs_emit(buf, "%d\n", defs.boottime); } /** * boottime_store - Set function for sysfs attribute boottime * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t boottime_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; uint32_t datum; int ret; ret = kstrtouint(buf, 10, &datum); if (ret) return ret; if (datum > 255) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); /* A too small boot time will result in the device booting into * standalone (no-host) mode before the host can take control of it, * so the change will be hard to revert. This may be a desired * feature (e.g to configure a very fast boot time for devices that * will not be attached to a host), but dangerous. So I'm enforcing a * lower limit of 20 seconds: remove and recompile the driver if this * does not work for you. */ datum = (datum < 20) ? 20 : datum; cmd.data[0] = datum; cmd.length = cpu_to_le16(sizeof(uint8_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_BOOTTIME); if (ret) return ret; return strlen(buf); } /** * channel_show - Get function for sysfs attribute channel * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t channel_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return sysfs_emit(buf, "%d\n", le16_to_cpu(defs.channel)); } /** * channel_store - Set function for sysfs attribute channel * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t channel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct lbs_private *priv = to_net_dev(dev)->ml_priv; struct cmd_ds_mesh_config cmd; uint32_t datum; int ret; ret = kstrtouint(buf, 10, &datum); if (ret) return ret; if (datum < 1 || datum > 11) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); *((__le16 *)&cmd.data[0]) = cpu_to_le16(datum); cmd.length = cpu_to_le16(sizeof(uint16_t)); ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_DEF_CHANNEL); if (ret) return ret; return strlen(buf); } /** * mesh_id_show - Get function for sysfs attribute mesh_id * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t mesh_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) { dev_err(dev, "inconsistent mesh ID length\n"); defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN; } memcpy(buf, defs.meshie.val.mesh_id, defs.meshie.val.mesh_id_len); buf[defs.meshie.val.mesh_id_len] = '\n'; buf[defs.meshie.val.mesh_id_len + 1] = '\0'; return defs.meshie.val.mesh_id_len + 1; } /** * mesh_id_store - Set function for sysfs attribute mesh_id * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t mesh_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; int len; int ret; if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1) return -EINVAL; memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); ie = (struct mrvl_meshie *) &cmd.data[0]; /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); len = count - 1; memcpy(ie->val.mesh_id, buf, len); /* SSID len */ ie->val.mesh_id_len = len; /* IE len */ ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } /** * protocol_id_show - Get function for sysfs attribute protocol_id * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t protocol_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return sysfs_emit(buf, "%d\n", defs.meshie.val.active_protocol_id); } /** * protocol_id_store - Set function for sysfs attribute protocol_id * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t protocol_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; uint32_t datum; int ret; ret = kstrtouint(buf, 10, &datum); if (ret) return ret; if (datum > 255) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ ie = (struct mrvl_meshie *) &cmd.data[0]; memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); /* update protocol id */ ie->val.active_protocol_id = datum; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } /** * metric_id_show - Get function for sysfs attribute metric_id * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t metric_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return sysfs_emit(buf, "%d\n", defs.meshie.val.active_metric_id); } /** * metric_id_store - Set function for sysfs attribute metric_id * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t metric_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if ((ret != 1) || (datum > 255)) return -EINVAL; /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ ie = (struct mrvl_meshie *) &cmd.data[0]; memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); /* update metric id */ ie->val.active_metric_id = datum; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } /** * capability_show - Get function for sysfs attribute capability * @dev: the &struct device * @attr: device attributes * @buf: buffer where data will be returned */ static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; int ret; ret = mesh_get_default_parameters(dev, &defs); if (ret) return ret; return sysfs_emit(buf, "%d\n", defs.meshie.val.mesh_capability); } /** * capability_store - Set function for sysfs attribute capability * @dev: the &struct device * @attr: device attributes * @buf: buffer that contains new attribute value * @count: size of buffer */ static ssize_t capability_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cmd_ds_mesh_config cmd; struct mrvl_mesh_defaults defs; struct mrvl_meshie *ie; struct lbs_private *priv = to_net_dev(dev)->ml_priv; uint32_t datum; int ret; memset(&cmd, 0, sizeof(cmd)); ret = sscanf(buf, "%d", &datum); if ((ret != 1) || (datum > 255)) return -EINVAL; /* fetch all other Information Element parameters */ ret = mesh_get_default_parameters(dev, &defs); cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie)); /* transfer IE elements */ ie = (struct mrvl_meshie *) &cmd.data[0]; memcpy(ie, &defs.meshie, sizeof(struct mrvl_meshie)); /* update value */ ie->val.mesh_capability = datum; ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, CMD_TYPE_MESH_SET_MESH_IE); if (ret) return ret; return strlen(buf); } static DEVICE_ATTR_RW(bootflag); static DEVICE_ATTR_RW(boottime); static DEVICE_ATTR_RW(channel); static DEVICE_ATTR_RW(mesh_id); static DEVICE_ATTR_RW(protocol_id); static DEVICE_ATTR_RW(metric_id); static DEVICE_ATTR_RW(capability); static struct attribute *boot_opts_attrs[] = { &dev_attr_bootflag.attr, &dev_attr_boottime.attr, &dev_attr_channel.attr, NULL }; static const struct attribute_group boot_opts_group = { .name = "boot_options", .attrs = boot_opts_attrs, }; static struct attribute *mesh_ie_attrs[] = { &dev_attr_mesh_id.attr, &dev_attr_protocol_id.attr, &dev_attr_metric_id.attr, &dev_attr_capability.attr, NULL }; static const struct attribute_group mesh_ie_group = { .name = "mesh_ie", .attrs = mesh_ie_attrs, }; /*************************************************************************** * Initializing and starting, stopping mesh */ /* * Check mesh FW version and appropriately send the mesh start * command */ void lbs_init_mesh(struct lbs_private *priv) { /* Determine mesh_fw_ver from fwrelease and fwcapinfo */ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */ /* 5.110.22 have mesh command with 0xa3 command id */ /* 10.0.0.p0 FW brings in mesh config command with different id */ /* Check FW version MSB and initialize mesh_fw_ver */ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) { /* Enable mesh, if supported, and work out which TLV it uses. 0x100 + 291 is an unofficial value used in 5.110.20.pXX 0x100 + 37 is the official value used in 5.110.21.pXX but we check them in that order because 20.pXX doesn't give an error -- it just silently fails. */ /* 5.110.20.pXX firmware will fail the command if the channel doesn't match the existing channel. But only if the TLV is correct. If the channel is wrong, _BOTH_ versions will give an error to 0x100+291, and allow 0x100+37 to succeed. It's just that 5.110.20.pXX will not have done anything useful */ priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID; if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1)) { priv->mesh_tlv = TLV_TYPE_MESH_ID; if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1)) priv->mesh_tlv = 0; } } else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) && (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) { /* 10.0.0.pXX new firmwares should succeed with TLV * 0x100+37; Do not invoke command with old TLV. */ priv->mesh_tlv = TLV_TYPE_MESH_ID; if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1)) priv->mesh_tlv = 0; } /* Stop meshing until interface is brought up */ lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, 1); } void lbs_start_mesh(struct lbs_private *priv) { lbs_add_mesh(priv); if (device_create_file(&priv->dev->dev, &dev_attr_lbs_mesh)) netdev_err(priv->dev, "cannot register lbs_mesh attribute\n"); } int lbs_deinit_mesh(struct lbs_private *priv) { struct net_device *dev = priv->dev; int ret = 0; if (priv->mesh_tlv) { device_remove_file(&dev->dev, &dev_attr_lbs_mesh); ret = 1; } return ret; } /** * lbs_mesh_stop - close the mshX interface * * @dev: A pointer to &net_device structure * returns: 0 */ static int lbs_mesh_stop(struct net_device *dev) { struct lbs_private *priv = dev->ml_priv; lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, lbs_mesh_get_channel(priv)); spin_lock_irq(&priv->driver_lock); netif_stop_queue(dev); netif_carrier_off(dev); spin_unlock_irq(&priv->driver_lock); lbs_update_mcast(priv); if (!lbs_iface_active(priv)) lbs_stop_iface(priv); return 0; } /** * lbs_mesh_dev_open - open the mshX interface * * @dev: A pointer to &net_device structure * returns: 0 or -EBUSY if monitor mode active */ static int lbs_mesh_dev_open(struct net_device *dev) { struct lbs_private *priv = dev->ml_priv; int ret = 0; if (!priv->iface_running) { ret = lbs_start_iface(priv); if (ret) goto out; } spin_lock_irq(&priv->driver_lock); if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { ret = -EBUSY; spin_unlock_irq(&priv->driver_lock); goto out; } netif_carrier_on(dev); if (!priv->tx_pending_len) netif_wake_queue(dev); spin_unlock_irq(&priv->driver_lock); ret = lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, lbs_mesh_get_channel(priv)); out: return ret; } static const struct net_device_ops mesh_netdev_ops = { .ndo_open = lbs_mesh_dev_open, .ndo_stop = lbs_mesh_stop, .ndo_start_xmit = lbs_hard_start_xmit, .ndo_set_mac_address = lbs_set_mac_address, .ndo_set_rx_mode = lbs_set_multicast_list, }; /** * lbs_add_mesh - add mshX interface * * @priv: A pointer to the &struct lbs_private structure * returns: 0 if successful, -X otherwise */ static int lbs_add_mesh(struct lbs_private *priv) { struct net_device *mesh_dev = NULL; struct wireless_dev *mesh_wdev; int ret = 0; /* Allocate a virtual mesh device */ mesh_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!mesh_wdev) { lbs_deb_mesh("init mshX wireless device failed\n"); ret = -ENOMEM; goto done; } mesh_dev = alloc_netdev(0, "msh%d", NET_NAME_UNKNOWN, ether_setup); if (!mesh_dev) { lbs_deb_mesh("init mshX device failed\n"); ret = -ENOMEM; goto err_free_wdev; } mesh_wdev->iftype = NL80211_IFTYPE_MESH_POINT; mesh_wdev->wiphy = priv->wdev->wiphy; if (priv->mesh_tlv) { sprintf(mesh_wdev->u.mesh.id, "mesh"); mesh_wdev->u.mesh.id_up_len = 4; } mesh_wdev->netdev = mesh_dev; mesh_dev->ml_priv = priv; mesh_dev->ieee80211_ptr = mesh_wdev; priv->mesh_dev = mesh_dev; mesh_dev->netdev_ops = &mesh_netdev_ops; mesh_dev->ethtool_ops = &lbs_ethtool_ops; eth_hw_addr_inherit(mesh_dev, priv->dev); SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group; mesh_dev->sysfs_groups[1] = &boot_opts_group; mesh_dev->sysfs_groups[2] = &mesh_ie_group; /* Register virtual mesh interface */ ret = register_netdev(mesh_dev); if (ret) { pr_err("cannot register mshX virtual interface\n"); goto err_free_netdev; } /* Everything successful */ ret = 0; goto done; err_free_netdev: free_netdev(mesh_dev); err_free_wdev: kfree(mesh_wdev); done: return ret; } void lbs_remove_mesh(struct lbs_private *priv) { struct net_device *mesh_dev; mesh_dev = priv->mesh_dev; if (!mesh_dev) return; netif_stop_queue(mesh_dev); netif_carrier_off(mesh_dev); unregister_netdev(mesh_dev); priv->mesh_dev = NULL; kfree(mesh_dev->ieee80211_ptr); free_netdev(mesh_dev); } /*************************************************************************** * Sending and receiving */ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv, struct net_device *dev, struct rxpd *rxpd) { if (priv->mesh_dev) { if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) { if (rxpd->rx_control & RxPD_MESH_FRAME) dev = priv->mesh_dev; } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) { if (rxpd->u.bss.bss_num == MESH_IFACE_ID) dev = priv->mesh_dev; } } return dev; } void lbs_mesh_set_txpd(struct lbs_private *priv, struct net_device *dev, struct txpd *txpd) { if (dev == priv->mesh_dev) { if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) txpd->u.bss.bss_num = MESH_IFACE_ID; } } /*************************************************************************** * Ethtool related */ static const char mesh_stat_strings[MESH_STATS_NUM][ETH_GSTRING_LEN] = { "drop_duplicate_bcast", "drop_ttl_zero", "drop_no_fwd_route", "drop_no_buffers", "fwded_unicast_cnt", "fwded_bcast_cnt", "drop_blind_table", "tx_failed_cnt" }; void lbs_mesh_ethtool_get_stats(struct net_device *dev, struct ethtool_stats *stats, uint64_t *data) { struct lbs_private *priv = dev->ml_priv; struct cmd_ds_mesh_access mesh_access; int ret; /* Get Mesh Statistics */ ret = lbs_mesh_access(priv, CMD_ACT_MESH_GET_STATS, &mesh_access); if (ret) { memset(data, 0, MESH_STATS_NUM*(sizeof(uint64_t))); return; } priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]); priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]); priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]); priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]); priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]); priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]); data[0] = priv->mstats.fwd_drop_rbt; data[1] = priv->mstats.fwd_drop_ttl; data[2] = priv->mstats.fwd_drop_noroute; data[3] = priv->mstats.fwd_drop_nobuf; data[4] = priv->mstats.fwd_unicast_cnt; data[5] = priv->mstats.fwd_bcast_cnt; data[6] = priv->mstats.drop_blind; data[7] = priv->mstats.tx_failed_cnt; } int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset) { struct lbs_private *priv = dev->ml_priv; if (sset == ETH_SS_STATS && dev == priv->mesh_dev) return MESH_STATS_NUM; return -EOPNOTSUPP; } void lbs_mesh_ethtool_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *s) { switch (stringset) { case ETH_SS_STATS: memcpy(s, mesh_stat_strings, sizeof(mesh_stat_strings)); break; } }
4 1 3 1 4 2 2 2 1 1 1 1 2 4 4 4 2 2 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 // SPDX-License-Identifier: GPL-2.0-or-later /* * SQ905 subdriver * * Copyright (C) 2008, 2009 Adam Baker and Theodore Kilgore */ /* * History and Acknowledgments * * The original Linux driver for SQ905 based cameras was written by * Marcell Lengyel and further developed by many other contributors * and is available from http://sourceforge.net/projects/sqcam/ * * This driver takes advantage of the reverse engineering work done for * that driver and for libgphoto2 but shares no code with them. * * This driver has used as a base the finepix driver and other gspca * based drivers and may still contain code fragments taken from those * drivers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905_CMD_TIMEOUT 500 #define SQ905_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 64 /* The known modes, or registers. These go in the "value" slot. */ /* 00 is "none" obviously */ #define SQ905_BULK_READ 0x03 /* precedes any bulk read */ #define SQ905_COMMAND 0x06 /* precedes the command codes below */ #define SQ905_PING 0x07 /* when reading an "idling" command */ #define SQ905_READ_DONE 0xc0 /* ack bulk read completed */ /* Any non-zero value in the bottom 2 bits of the 2nd byte of * the ID appears to indicate the camera can do 640*480. If the * LSB of that byte is set the image is just upside down, otherwise * it is rotated 180 degrees. */ #define SQ905_HIRES_MASK 0x00000300 #define SQ905_ORIENTATION_MASK 0x00000100 /* Some command codes. These go in the "index" slot. */ #define SQ905_ID 0xf0 /* asks for model string */ #define SQ905_CONFIG 0x20 /* gets photo alloc. table, not used here */ #define SQ905_DATA 0x30 /* accesses photo data, not used here */ #define SQ905_CLEAR 0xa0 /* clear everything */ #define SQ905_CAPTURE_LOW 0x60 /* Starts capture at 160x120 */ #define SQ905_CAPTURE_MED 0x61 /* Starts capture at 320x240 */ #define SQ905_CAPTURE_HIGH 0x62 /* Starts capture at 640x480 (some cams only) */ /* note that the capture command also controls the output dimensions */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ /* * Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; static struct v4l2_pix_format sq905_mode[] = { { 160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* * Send a command to the camera. */ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_COMMAND, index, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_PING, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed 2 (%d)\n", __func__, ret); return ret; } return 0; } /* * Acknowledge the end of a frame - see warning on sq905_command. */ static int sq905_ack_frame(struct gspca_dev *gspca_dev) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * request and read a block of data - see warning on sq905_command. */ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; int act_len = 0; gspca_dev->usb_buf[0] = '\0'; if (need_lock) mutex_lock(&gspca_dev->usb_lock); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_BULK_READ, size, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (need_lock) mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), data, size, &act_len, SQ905_DATA_TIMEOUT); /* successful, it returns 0, otherwise negative */ if (ret < 0 || act_len != size) { pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size); return -EIO; } return 0; } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void sq905_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int header_read; /* true if we have already read the frame header. */ int packet_type; int frame_sz; int ret; u8 *data; u8 *buffer; buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage + FRAME_HEADER_LEN; while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* request some data and then read it until we have * a complete frame. */ bytes_left = frame_sz; header_read = 0; /* Note we do not check for gspca_dev->streaming here, as we must finish reading an entire frame, otherwise the next time we stream we start reading in the middle of a frame. */ while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905_MAX_TRANSFER ? SQ905_MAX_TRANSFER : bytes_left; ret = sq905_read_data(gspca_dev, buffer, data_len, 1); if (ret < 0) goto quit_stream; gspca_dbg(gspca_dev, D_PACK, "Got %d bytes out of %d for frame\n", data_len, bytes_left); bytes_left -= data_len; data = buffer; if (!header_read) { packet_type = FIRST_PACKET; /* The first 64 bytes of each frame are * a header full of FF 00 bytes */ data += FRAME_HEADER_LEN; data_len -= FRAME_HEADER_LEN; header_read = 1; } else if (bytes_left == 0) { packet_type = LAST_PACKET; } else { packet_type = INTER_PACKET; } gspca_frame_add(gspca_dev, packet_type, data, data_len); /* If entire frame fits in one packet we still need to add a LAST_PACKET */ if (packet_type == FIRST_PACKET && bytes_left == 0) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } if (gspca_dev->present) { /* acknowledge the frame */ mutex_lock(&gspca_dev->usb_lock); ret = sq905_ack_frame(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) goto quit_stream; } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905_command(gspca_dev, SQ905_CLEAR); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk = 1; cam->bulk_size = 64; INIT_WORK(&dev->work_struct, sq905_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u32 ident; int ret; /* connect to the camera and read * the model ID and process that and put it away. */ ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; ret = sq905_command(gspca_dev, SQ905_ID); if (ret < 0) return ret; ret = sq905_read_data(gspca_dev, gspca_dev->usb_buf, 4, 0); if (ret < 0) return ret; /* usb_buf is allocated with kmalloc so is aligned. * Camera model number is the right way round if we assume this * reverse engineered ID is supposed to be big endian. */ ident = be32_to_cpup((__be32 *)gspca_dev->usb_buf); ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; gspca_dbg(gspca_dev, D_CONF, "SQ905 camera ID %08x detected\n", ident); gspca_dev->cam.cam_mode = sq905_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(sq905_mode); if (!(ident & SQ905_HIRES_MASK)) gspca_dev->cam.nmodes--; if (ident & SQ905_ORIENTATION_MASK) gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP; else gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; return 0; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->curr_mode) { default: /* case 2: */ gspca_dbg(gspca_dev, D_STREAM, "Start streaming at high resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_HIGH); break; case 1: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at medium resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_MED); break; case 0: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at low resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_LOW); } if (ret < 0) { gspca_err(gspca_dev, "Start streaming command failed\n"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); if (!dev->work_thread) return -ENOMEM; queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x9120)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
1 4 2 2 1 1 1 4 4 3 1 1 1 4 5 5 5 1 4 5 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 // SPDX-License-Identifier: GPL-2.0 /* * When connected to the machine, the Thrustmaster wheels appear as * a «generic» hid gamepad called "Thrustmaster FFB Wheel". * * When in this mode not every functionality of the wheel, like the force feedback, * are available. To enable all functionalities of a Thrustmaster wheel we have to send * to it a specific USB CONTROL request with a code different for each wheel. * * This driver tries to understand which model of Thrustmaster wheel the generic * "Thrustmaster FFB Wheel" really is and then sends the appropriate control code. * * Copyright (c) 2020-2021 Dario Pagani <dario.pagani.146+linuxk@gmail.com> * Copyright (c) 2020-2021 Kim Kuparinen <kimi.h.kuparinen@gmail.com> */ #include <linux/hid.h> #include <linux/usb.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> /* * These interrupts are used to prevent a nasty crash when initializing the * T300RS. Used in thrustmaster_interrupts(). */ static const u8 setup_0[] = { 0x42, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_1[] = { 0x0a, 0x04, 0x90, 0x03, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_2[] = { 0x0a, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_3[] = { 0x0a, 0x04, 0x12, 0x10, 0x00, 0x00, 0x00, 0x00 }; static const u8 setup_4[] = { 0x0a, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00 }; static const u8 *const setup_arr[] = { setup_0, setup_1, setup_2, setup_3, setup_4 }; static const unsigned int setup_arr_sizes[] = { ARRAY_SIZE(setup_0), ARRAY_SIZE(setup_1), ARRAY_SIZE(setup_2), ARRAY_SIZE(setup_3), ARRAY_SIZE(setup_4) }; /* * This struct contains for each type of * Thrustmaster wheel * * Note: The values are stored in the CPU * endianness, the USB protocols always use * little endian; the macro cpu_to_le[BIT]() * must be used when preparing USB packets * and vice-versa */ struct tm_wheel_info { uint16_t wheel_type; /* * See when the USB control out packet is prepared... * @TODO The TMX seems to require multiple control codes to switch. */ uint16_t switch_value; char const *const wheel_name; }; /* * Known wheels. * Note: TMX does not work as it requires 2 control packets */ static const struct tm_wheel_info tm_wheels_infos[] = { {0x0306, 0x0006, "Thrustmaster T150RS"}, {0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"}, {0x0206, 0x0005, "Thrustmaster T300RS"}, {0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"}, {0x020a, 0x0005, "Thrustmaster T300RS (Sparco R383 Mod)"}, {0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"}, {0x0002, 0x0002, "Thrustmaster T500RS"} //{0x0407, 0x0001, "Thrustmaster TMX"} }; static const uint8_t tm_wheels_infos_length = 7; /* * This structs contains (in little endian) the response data * of the wheel to the request 73 * * A sufficient research to understand what each field does is not * beign conducted yet. The position and meaning of fields are a * just a very optimistic guess based on instinct.... */ struct __packed tm_wheel_response { /* * Seems to be the type of packet * - 0x0049 if is data.a (15 bytes) * - 0x0047 if is data.b (7 bytes) */ uint16_t type; union { struct __packed { uint16_t field0; uint16_t field1; /* * Seems to be the model code of the wheel * Read table thrustmaster_wheels to values */ uint16_t model; uint16_t field2; uint16_t field3; uint16_t field4; uint16_t field5; } a; struct __packed { uint16_t field0; uint16_t field1; uint16_t model; } b; } data; }; struct tm_wheel { struct usb_device *usb_dev; struct urb *urb; struct usb_ctrlrequest *model_request; struct tm_wheel_response *response; struct usb_ctrlrequest *change_request; }; /* The control packet to send to wheel */ static const struct usb_ctrlrequest model_request = { .bRequestType = 0xc1, .bRequest = 73, .wValue = 0, .wIndex = 0, .wLength = cpu_to_le16(0x0010) }; static const struct usb_ctrlrequest change_request = { .bRequestType = 0x41, .bRequest = 83, .wValue = 0, // Will be filled by the driver .wIndex = 0, .wLength = 0 }; /* * On some setups initializing the T300RS crashes the kernel, * these interrupts fix that particular issue. So far they haven't caused any * adverse effects in other wheels. */ static void thrustmaster_interrupts(struct hid_device *hdev) { int ret, trans, i, b_ep; u8 *send_buf = kmalloc(256, GFP_KERNEL); struct usb_host_endpoint *ep; struct device *dev = &hdev->dev; struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); if (!send_buf) { hid_err(hdev, "failed allocating send buffer\n"); return; } if (usbif->cur_altsetting->desc.bNumEndpoints < 2) { kfree(send_buf); hid_err(hdev, "Wrong number of endpoints?\n"); return; } ep = &usbif->cur_altsetting->endpoint[1]; b_ep = ep->desc.bEndpointAddress; /* Are the expected endpoints present? */ u8 ep_addr[2] = {b_ep, 0}; if (!usb_check_int_endpoints(usbif, ep_addr)) { hid_err(hdev, "Unexpected non-int endpoint\n"); return; } for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) { memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]); ret = usb_interrupt_msg(usbdev, usb_sndintpipe(usbdev, b_ep), send_buf, setup_arr_sizes[i], &trans, USB_CTRL_SET_TIMEOUT); if (ret) { hid_err(hdev, "setup data couldn't be sent\n"); kfree(send_buf); return; } } kfree(send_buf); } static void thrustmaster_change_handler(struct urb *urb) { struct hid_device *hdev = urb->context; // The wheel seems to kill himself before answering the host and therefore is violating the USB protocol... if (urb->status == 0 || urb->status == -EPROTO || urb->status == -EPIPE) hid_info(hdev, "Success?! The wheel should have been initialized!\n"); else hid_warn(hdev, "URB to change wheel mode seems to have failed with error %d\n", urb->status); } /* * Called by the USB subsystem when the wheel responses to our request * to get [what it seems to be] the wheel's model. * * If the model id is recognized then we send an opportune USB CONTROL REQUEST * to switch the wheel to its full capabilities */ static void thrustmaster_model_handler(struct urb *urb) { struct hid_device *hdev = urb->context; struct tm_wheel *tm_wheel = hid_get_drvdata(hdev); uint16_t model = 0; int i, ret; const struct tm_wheel_info *twi = NULL; if (urb->status) { hid_err(hdev, "URB to get model id failed with error %d\n", urb->status); return; } if (tm_wheel->response->type == cpu_to_le16(0x49)) model = le16_to_cpu(tm_wheel->response->data.a.model); else if (tm_wheel->response->type == cpu_to_le16(0x47)) model = le16_to_cpu(tm_wheel->response->data.b.model); else { hid_err(hdev, "Unknown packet type 0x%x, unable to proceed further with wheel init\n", tm_wheel->response->type); return; } for (i = 0; i < tm_wheels_infos_length && !twi; i++) if (tm_wheels_infos[i].wheel_type == model) twi = tm_wheels_infos + i; if (twi) hid_info(hdev, "Wheel with model id 0x%x is a %s\n", model, twi->wheel_name); else { hid_err(hdev, "Unknown wheel's model id 0x%x, unable to proceed further with wheel init\n", model); return; } tm_wheel->change_request->wValue = cpu_to_le16(twi->switch_value); usb_fill_control_urb( tm_wheel->urb, tm_wheel->usb_dev, usb_sndctrlpipe(tm_wheel->usb_dev, 0), (char *)tm_wheel->change_request, NULL, 0, // We do not expect any response from the wheel thrustmaster_change_handler, hdev ); ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC); if (ret) hid_err(hdev, "Error %d while submitting the change URB. I am unable to initialize this wheel...\n", ret); } static void thrustmaster_remove(struct hid_device *hdev) { struct tm_wheel *tm_wheel = hid_get_drvdata(hdev); usb_kill_urb(tm_wheel->urb); kfree(tm_wheel->change_request); kfree(tm_wheel->response); kfree(tm_wheel->model_request); usb_free_urb(tm_wheel->urb); kfree(tm_wheel); hid_hw_stop(hdev); } /* * Function called by HID when a hid Thrustmaster FFB wheel is connected to the host. * This function starts the hid dev, tries to allocate the tm_wheel data structure and * finally send an USB CONTROL REQUEST to the wheel to get [what it seems to be] its * model type. */ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret = 0; struct tm_wheel *tm_wheel = NULL; if (!hid_is_usb(hdev)) return -EINVAL; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed with error %d\n", ret); goto error0; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed with error %d\n", ret); goto error0; } // Now we allocate the tm_wheel tm_wheel = kzalloc(sizeof(struct tm_wheel), GFP_KERNEL); if (!tm_wheel) { ret = -ENOMEM; goto error1; } tm_wheel->urb = usb_alloc_urb(0, GFP_ATOMIC); if (!tm_wheel->urb) { ret = -ENOMEM; goto error2; } tm_wheel->model_request = kmemdup(&model_request, sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!tm_wheel->model_request) { ret = -ENOMEM; goto error3; } tm_wheel->response = kzalloc(sizeof(struct tm_wheel_response), GFP_KERNEL); if (!tm_wheel->response) { ret = -ENOMEM; goto error4; } tm_wheel->change_request = kmemdup(&change_request, sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!tm_wheel->change_request) { ret = -ENOMEM; goto error5; } tm_wheel->usb_dev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); hid_set_drvdata(hdev, tm_wheel); thrustmaster_interrupts(hdev); usb_fill_control_urb( tm_wheel->urb, tm_wheel->usb_dev, usb_rcvctrlpipe(tm_wheel->usb_dev, 0), (char *)tm_wheel->model_request, tm_wheel->response, sizeof(struct tm_wheel_response), thrustmaster_model_handler, hdev ); ret = usb_submit_urb(tm_wheel->urb, GFP_ATOMIC); if (ret) { hid_err(hdev, "Error %d while submitting the URB. I am unable to initialize this wheel...\n", ret); goto error6; } return ret; error6: kfree(tm_wheel->change_request); error5: kfree(tm_wheel->response); error4: kfree(tm_wheel->model_request); error3: usb_free_urb(tm_wheel->urb); error2: kfree(tm_wheel); error1: hid_hw_stop(hdev); error0: return ret; } static const struct hid_device_id thrustmaster_devices[] = { { HID_USB_DEVICE(0x044f, 0xb65d)}, {} }; MODULE_DEVICE_TABLE(hid, thrustmaster_devices); static struct hid_driver thrustmaster_driver = { .name = "hid-thrustmaster", .id_table = thrustmaster_devices, .probe = thrustmaster_probe, .remove = thrustmaster_remove, }; module_hid_driver(thrustmaster_driver); MODULE_AUTHOR("Dario Pagani <dario.pagani.146+linuxk@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver to initialize some steering wheel joysticks from Thrustmaster");
3202 3197 32 32 3196 3192 1 3189 32 3202 3202 3195 3196 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra * * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/irq_work.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/smpboot.h> #include <asm/processor.h> #include <linux/kasan.h> #include <trace/events/ipi.h> static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); static DEFINE_PER_CPU(struct task_struct *, irq_workd); static void wake_irq_workd(void) { struct task_struct *tsk = __this_cpu_read(irq_workd); if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) wake_up_process(tsk); } #ifdef CONFIG_SMP static void irq_work_wake(struct irq_work *entry) { wake_irq_workd(); } static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = IRQ_WORK_INIT_HARD(irq_work_wake); #endif static int irq_workd_should_run(unsigned int cpu) { return !llist_empty(this_cpu_ptr(&lazy_list)); } /* * Claim the entry so that no one else will poke at it. */ static bool irq_work_claim(struct irq_work *work) { int oflags; oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); /* * If the work is already pending, no need to raise the IPI. * The pairing smp_mb() in irq_work_single() makes sure * everything we did before is visible. */ if (oflags & IRQ_WORK_PENDING) return false; return true; } void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ } static __always_inline void irq_work_raise(struct irq_work *work) { if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); arch_irq_work_raise(); } /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { struct llist_head *list; bool rt_lazy_work = false; bool lazy_work = false; int work_flags; work_flags = atomic_read(&work->node.a_flags); if (work_flags & IRQ_WORK_LAZY) lazy_work = true; else if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(work_flags & IRQ_WORK_HARD_IRQ)) rt_lazy_work = true; if (lazy_work || rt_lazy_work) list = this_cpu_ptr(&lazy_list); else list = this_cpu_ptr(&raised_list); if (!llist_add(&work->node.llist, list)) return; /* If the work is "lazy", handle it from next tick if any */ if (!lazy_work || tick_nohz_tick_stopped()) irq_work_raise(work); } /* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); __irq_work_queue_local(work); preempt_enable(); return true; } EXPORT_SYMBOL_GPL(irq_work_queue); /* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue_on(struct irq_work *work, int cpu) { #ifndef CONFIG_SMP return irq_work_queue(work); #else /* CONFIG_SMP: */ /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; kasan_record_aux_stack(work); preempt_disable(); if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); /* * On PREEMPT_RT the items which are not marked as * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work * item is used on the remote CPU to wake the thread. */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) goto out; work = &per_cpu(irq_work_wakeup, cpu); if (!irq_work_claim(work)) goto out; } __smp_call_single_queue(cpu, &work->node.llist); } else { __irq_work_queue_local(work); } out: preempt_enable(); return true; #endif /* CONFIG_SMP */ } bool irq_work_needs_cpu(void) { struct llist_head *raised, *lazy; raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); if (llist_empty(raised) || arch_irq_work_has_interrupt()) if (llist_empty(lazy)) return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); return true; } void irq_work_single(void *arg) { struct irq_work *work = arg; int flags; /* * Clear the PENDING bit, after this point the @work can be re-used. * The PENDING bit acts as a lock, and we own it, so we can clear it * without atomic ops. */ flags = atomic_read(&work->node.a_flags); flags &= ~IRQ_WORK_PENDING; atomic_set(&work->node.a_flags, flags); /* * See irq_work_claim(). */ smp_mb(); lockdep_irq_work_enter(flags); work->func(work); lockdep_irq_work_exit(flags); /* * Clear the BUSY bit, if set, and return to the free state if no-one * else claimed it meanwhile. */ (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || !arch_irq_work_has_interrupt()) rcuwait_wake_up(&work->irqwait); } static void irq_work_run_list(struct llist_head *list) { struct irq_work *work, *tmp; struct llist_node *llnode; /* * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed * in a per-CPU thread in preemptible context. Only the items which are * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. */ BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); if (llist_empty(list)) return; llnode = llist_del_all(list); llist_for_each_entry_safe(work, tmp, llnode, node.llist) irq_work_single(work); } /* * hotplug calls this through: * hotplug_cfd() -> flush_smp_call_function_queue() */ void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) irq_work_run_list(this_cpu_ptr(&lazy_list)); else wake_irq_workd(); } EXPORT_SYMBOL_GPL(irq_work_run); void irq_work_tick(void) { struct llist_head *raised = this_cpu_ptr(&raised_list); if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) irq_work_run_list(this_cpu_ptr(&lazy_list)); else wake_irq_workd(); } /* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled(); might_sleep(); if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || !arch_irq_work_has_interrupt()) { rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), TASK_UNINTERRUPTIBLE); return; } while (irq_work_is_busy(work)) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); static void run_irq_workd(unsigned int cpu) { irq_work_run_list(this_cpu_ptr(&lazy_list)); } static void irq_workd_setup(unsigned int cpu) { sched_set_fifo_low(current); } static struct smp_hotplug_thread irqwork_threads = { .store = &irq_workd, .setup = irq_workd_setup, .thread_should_run = irq_workd_should_run, .thread_fn = run_irq_workd, .thread_comm = "irq_work/%u", }; static __init int irq_work_init_threads(void) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); return 0; } early_initcall(irq_work_init_threads);
14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef RQ_QOS_H #define RQ_QOS_H #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/blk_types.h> #include <linux/atomic.h> #include <linux/wait.h> #include <linux/blk-mq.h> #include "blk-mq-debugfs.h" struct blk_mq_debugfs_attr; enum rq_qos_id { RQ_QOS_WBT, RQ_QOS_LATENCY, RQ_QOS_COST, }; struct rq_wait { wait_queue_head_t wait; atomic_t inflight; }; struct rq_qos { const struct rq_qos_ops *ops; struct gendisk *disk; enum rq_qos_id id; struct rq_qos *next; #ifdef CONFIG_BLK_DEBUG_FS struct dentry *debugfs_dir; #endif }; struct rq_qos_ops { void (*throttle)(struct rq_qos *, struct bio *); void (*track)(struct rq_qos *, struct request *, struct bio *); void (*merge)(struct rq_qos *, struct request *, struct bio *); void (*issue)(struct rq_qos *, struct request *); void (*requeue)(struct rq_qos *, struct request *); void (*done)(struct rq_qos *, struct request *); void (*done_bio)(struct rq_qos *, struct bio *); void (*cleanup)(struct rq_qos *, struct bio *); void (*queue_depth_changed)(struct rq_qos *); void (*exit)(struct rq_qos *); const struct blk_mq_debugfs_attr *debugfs_attrs; }; struct rq_depth { unsigned int max_depth; int scale_step; bool scaled_max; unsigned int queue_depth; unsigned int default_depth; }; static inline struct rq_qos *rq_qos_id(struct request_queue *q, enum rq_qos_id id) { struct rq_qos *rqos; for (rqos = q->rq_qos; rqos; rqos = rqos->next) { if (rqos->id == id) break; } return rqos; } static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) { return rq_qos_id(q, RQ_QOS_WBT); } static inline struct rq_qos *iolat_rq_qos(struct request_queue *q) { return rq_qos_id(q, RQ_QOS_LATENCY); } static inline void rq_wait_init(struct rq_wait *rq_wait) { atomic_set(&rq_wait->inflight, 0); init_waitqueue_head(&rq_wait->wait); } int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id, const struct rq_qos_ops *ops); void rq_qos_del(struct rq_qos *rqos); typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data); typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data); void rq_qos_wait(struct rq_wait *rqw, void *private_data, acquire_inflight_cb_t *acquire_inflight_cb, cleanup_cb_t *cleanup_cb); bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); bool rq_depth_scale_up(struct rq_depth *rqd); bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); bool rq_depth_calc_max_depth(struct rq_depth *rqd); void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); void __rq_qos_done(struct rq_qos *rqos, struct request *rq); void __rq_qos_issue(struct rq_qos *rqos, struct request *rq); void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq); void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); void __rq_qos_queue_depth_changed(struct rq_qos *rqos); static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) { if (q->rq_qos) __rq_qos_cleanup(q->rq_qos, bio); } static inline void rq_qos_done(struct request_queue *q, struct request *rq) { if (q->rq_qos && !blk_rq_is_passthrough(rq)) __rq_qos_done(q->rq_qos, rq); } static inline void rq_qos_issue(struct request_queue *q, struct request *rq) { if (q->rq_qos) __rq_qos_issue(q->rq_qos, rq); } static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) { if (q->rq_qos) __rq_qos_requeue(q->rq_qos, rq); } static inline void rq_qos_done_bio(struct bio *bio) { if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || bio_flagged(bio, BIO_QOS_MERGED))) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); if (q->rq_qos) __rq_qos_done_bio(q->rq_qos, bio); } } static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) { if (q->rq_qos) { bio_set_flag(bio, BIO_QOS_THROTTLED); __rq_qos_throttle(q->rq_qos, bio); } } static inline void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) { if (q->rq_qos) __rq_qos_track(q->rq_qos, rq, bio); } static inline void rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) { if (q->rq_qos) { bio_set_flag(bio, BIO_QOS_MERGED); __rq_qos_merge(q->rq_qos, rq, bio); } } static inline void rq_qos_queue_depth_changed(struct request_queue *q) { if (q->rq_qos) __rq_qos_queue_depth_changed(q->rq_qos); } void rq_qos_exit(struct request_queue *); #endif
14 14 14 14 14 14 14 14 14 5 5 5 5 5 5 64 64 64 63 14 14 14 14 14 14 14 64 64 64 60 64 64 64 13 14 64 64 64 63 64 64 64 64 64 63 64 64 63 64 64 9 64 64 5 5 5 4 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 14 14 14 13 14 14 14 14 14 13 14 14 14 14 14 5 5 5 5 5 14 14 14 13 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 13 14 14 14 14 14 14 13 14 14 13 13 14 14 14 14 14 14 14 14 14 14 5 14 14 14 14 14 13 14 13 14 14 14 13 14 14 14 14 14 14 14 14 14 14 14 13 14 14 14 14 14 14 5 9 9 14 14 14 14 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/time.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/dax.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/rmap.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/bio.h> #include <linux/workqueue.h> #include <linux/kernel.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/iomap.h> #include <linux/iversion.h> #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "truncate.h" #include <trace/events/ext4.h> static void ext4_journalled_zero_new_buffers(handle_t *handle, struct inode *inode, struct folio *folio, unsigned from, unsigned to); static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __u16 dummy_csum = 0; int offset = offsetof(struct ext4_inode, i_checksum_lo); unsigned int csum_size = sizeof(dummy_csum); csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset); csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); offset += csum_size; csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, EXT4_GOOD_OLD_INODE_SIZE - offset); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { offset = offsetof(struct ext4_inode, i_checksum_hi); csum = ext4_chksum(sbi, csum, (__u8 *)raw + EXT4_GOOD_OLD_INODE_SIZE, offset - EXT4_GOOD_OLD_INODE_SIZE); if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size); offset += csum_size; } csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset, EXT4_INODE_SIZE(inode->i_sb) - offset); } return csum; } static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 provided, calculated; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_feature_metadata_csum(inode->i_sb)) return 1; provided = le16_to_cpu(raw->i_checksum_lo); calculated = ext4_inode_csum(inode, raw, ei); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; else calculated &= 0xFFFF; return provided == calculated; } void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 csum; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_feature_metadata_csum(inode->i_sb)) return; csum = ext4_inode_csum(inode, raw, ei); raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) raw->i_checksum_hi = cpu_to_le16(csum >> 16); } static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { trace_ext4_begin_ordered_truncate(inode, new_size); /* * If jinode is zero, then we never opened the file for * writing, so there's no need to call * jbd2_journal_begin_ordered_truncate() since there's no * outstanding writes we need to flush. */ if (!EXT4_I(inode)->jinode) return 0; return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode, new_size); } static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents); /* * Test whether an inode is a fast symlink. * A fast symlink has its symlink data stored in ext4_inode_info->i_data. */ int ext4_inode_is_fast_symlink(struct inode *inode) { if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { int ea_blocks = EXT4_I(inode)->i_file_acl ? EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; if (ext4_has_inline_data(inode)) return 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } return S_ISLNK(inode->i_mode) && inode->i_size && (inode->i_size < EXT4_N_BLOCKS * 4); } /* * Called at the last iput() if i_nlink is zero. */ void ext4_evict_inode(struct inode *inode) { handle_t *handle; int err; /* * Credits for final inode cleanup and freeing: * sb + inode (ext4_orphan_del()), block bitmap, group descriptor * (xattr block freeing), bitmap, group descriptor (inode freeing) */ int extra_credits = 6; struct ext4_xattr_inode_array *ea_inode_array = NULL; bool freeze_protected = false; trace_ext4_evict_inode(inode); dax_break_layout_final(inode); if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL) ext4_evict_ea_inode(inode); if (inode->i_nlink) { truncate_inode_pages_final(&inode->i_data); goto no_delete; } if (is_bad_inode(inode)) goto no_delete; dquot_initialize(inode); if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages_final(&inode->i_data); /* * For inodes with journalled data, transaction commit could have * dirtied the inode. And for inodes with dioread_nolock, unwritten * extents converting worker could merge extents and also have dirtied * the inode. Flush worker is ignoring it because of I_FREEING flag but * we still need to remove the inode from the writeback lists. */ if (!list_empty_careful(&inode->i_io_list)) inode_io_list_del(inode); /* * Protect us against freezing - iput() caller didn't have to have any * protection against it. When we are in a running transaction though, * we are already protected against freezing and we cannot grab further * protection due to lock ordering constraints. */ if (!ext4_journal_current_handle()) { sb_start_intwrite(inode->i_sb); freeze_protected = true; } if (!IS_NOQUOTA(inode)) extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb); /* * Block bitmap, group descriptor, and inode are accounted in both * ext4_blocks_for_truncate() and extra_credits. So subtract 3. */ handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, ext4_blocks_for_truncate(inode) + extra_credits - 3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); /* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ ext4_orphan_del(NULL, inode); if (freeze_protected) sb_end_intwrite(inode->i_sb); goto no_delete; } if (IS_SYNC(inode)) ext4_handle_sync(handle); /* * Set inode->i_size to 0 before calling ext4_truncate(). We need * special handling of symlinks here because i_size is used to * determine whether ext4_inode_info->i_data contains symlink data or * block mappings. Setting i_size to 0 will remove its fast symlink * status. Erase i_data so that it becomes a valid empty block map. */ if (ext4_inode_is_fast_symlink(inode)) memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data)); inode->i_size = 0; err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_warning(inode->i_sb, "couldn't mark inode dirty (err %d)", err); goto stop_handle; } if (inode->i_blocks) { err = ext4_truncate(inode); if (err) { ext4_error_err(inode->i_sb, -err, "couldn't truncate inode %lu (err %d)", inode->i_ino, err); goto stop_handle; } } /* Remove xattr references. */ err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array, extra_credits); if (err) { ext4_warning(inode->i_sb, "xattr delete (err %d)", err); stop_handle: ext4_journal_stop(handle); ext4_orphan_del(NULL, inode); if (freeze_protected) sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); goto no_delete; } /* * Kill off the orphan record which ext4_truncate created. * AKPM: I think this can be inside the above `if'. * Note that ext4_orphan_del() has to be able to cope with the * deletion of a non-existent orphan - this is because we don't * know if ext4_truncate() actually created an orphan record. * (Well, we could do this if we need to, but heck - it works) */ ext4_orphan_del(handle, inode); EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds(); /* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ if (ext4_mark_inode_dirty(handle, inode)) /* If that failed, just do the required in-core inode clear. */ ext4_clear_inode(inode); else ext4_free_inode(handle, inode); ext4_journal_stop(handle); if (freeze_protected) sb_end_intwrite(inode->i_sb); ext4_xattr_inode_array_free(ea_inode_array); return; no_delete: /* * Check out some where else accidentally dirty the evicting inode, * which may probably cause inode use-after-free issues later. */ WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list)); if (!list_empty(&EXT4_I(inode)->i_fc_list)) ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ } #ifdef CONFIG_QUOTA qsize_t *ext4_get_reserved_space(struct inode *inode) { return &EXT4_I(inode)->i_reserved_quota; } #endif /* * Called with i_data_sem down, which is important since we can call * ext4_discard_preallocations() from here. */ void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); spin_lock(&ei->i_block_reservation_lock); trace_ext4_da_update_reserve_space(inode, used, quota_claim); if (unlikely(used > ei->i_reserved_data_blocks)) { ext4_warning(inode->i_sb, "%s: ino %lu, used %d " "with only %d reserved data blocks", __func__, inode->i_ino, used, ei->i_reserved_data_blocks); WARN_ON(1); used = ei->i_reserved_data_blocks; } /* Update per-inode reservations */ ei->i_reserved_data_blocks -= used; percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); spin_unlock(&ei->i_block_reservation_lock); /* Update quota subsystem for data blocks */ if (quota_claim) dquot_claim_block(inode, EXT4_C2B(sbi, used)); else { /* * We did fallocate with an offset that is already delayed * allocated. So on delayed allocated writeback we should * not re-claim the quota for fallocated blocks. */ dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); } /* * If we have done all the pending block allocations and if * there aren't any writers on the inode, we can discard the * inode's preallocations. */ if ((ei->i_reserved_data_blocks == 0) && !inode_is_open_for_write(inode)) ext4_discard_preallocations(inode); } static int __check_block_validity(struct inode *inode, const char *func, unsigned int line, struct ext4_map_blocks *map) { if (ext4_has_feature_journal(inode->i_sb) && (inode->i_ino == le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) return 0; if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, map->m_pblk, map->m_len); return -EFSCORRUPTED; } return 0; } int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, ext4_lblk_t len) { int ret; if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) return fscrypt_zeroout_range(inode, lblk, pblk, len); ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS); if (ret > 0) ret = 0; return ret; } #define check_block_validity(inode, map) \ __check_block_validity((inode), __func__, __LINE__, (map)) #ifdef ES_AGGRESSIVE_TEST static void ext4_map_blocks_es_recheck(handle_t *handle, struct inode *inode, struct ext4_map_blocks *es_map, struct ext4_map_blocks *map, int flags) { int retval; map->m_flags = 0; /* * There is a race window that the result is not the same. * e.g. xfstests #223 when dioread_nolock enables. The reason * is that we lookup a block mapping in extent status tree with * out taking i_data_sem. So at the time the unwritten extent * could be converted. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, 0); } else { retval = ext4_ind_map_blocks(handle, inode, map, 0); } up_read((&EXT4_I(inode)->i_data_sem)); /* * We don't check m_len because extent will be collpased in status * tree. So the m_len might not equal. */ if (es_map->m_lblk != map->m_lblk || es_map->m_flags != map->m_flags || es_map->m_pblk != map->m_pblk) { printk("ES cache assertion failed for inode: %lu " "es_cached ex [%d/%d/%llu/%x] != " "found ex [%d/%d/%llu/%x] retval %d flags %x\n", inode->i_ino, es_map->m_lblk, es_map->m_len, es_map->m_pblk, es_map->m_flags, map->m_lblk, map->m_len, map->m_pblk, map->m_flags, retval, flags); } } #endif /* ES_AGGRESSIVE_TEST */ static int ext4_map_query_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map) { unsigned int status; int retval; if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) retval = ext4_ext_map_blocks(handle, inode, map, 0); else retval = ext4_ind_map_blocks(handle, inode, map, 0); if (retval <= 0) return retval; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status, false); return retval; } static int ext4_map_create_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct extent_status es; unsigned int status; int err, retval = 0; /* * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE * indicates that the blocks and quotas has already been * checked when the data was copied into the page cache. */ if (map->m_flags & EXT4_MAP_DELAYED) flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; /* * Here we clear m_flags because after allocating an new extent, * it will be set again. */ map->m_flags &= ~EXT4_MAP_FLAGS; /* * We need to check for EXT4 here because migrate could have * changed the inode type in between. */ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags); /* * We allocated new blocks which will result in i_data's * format changing. Force the migrate to fail by clearing * migrate flags. */ if (retval > 0 && map->m_flags & EXT4_MAP_NEW) ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); } if (retval <= 0) return retval; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode %lu: " "retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } /* * We have to zeroout blocks before inserting them into extent * status tree. Otherwise someone could look them up there and * use them before they are really zeroed. We also have to * unmap metadata before zeroing as otherwise writeback can * overwrite zeros with stale data from block device. */ if (flags & EXT4_GET_BLOCKS_ZERO && map->m_flags & EXT4_MAP_MAPPED && map->m_flags & EXT4_MAP_NEW) { err = ext4_issue_zeroout(inode, map->m_lblk, map->m_pblk, map->m_len); if (err) return err; } /* * If the extent has been zeroed out, we don't need to update * extent status tree. */ if (flags & EXT4_GET_BLOCKS_PRE_IO && ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { if (ext4_es_is_written(&es)) return retval; } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status, flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE); return retval; } /* * The ext4_map_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. * * Otherwise it takes the write lock of the i_data_sem and allocate blocks * and store the allocated blocks in the result buffer head and mark it * mapped. * * If file type is extents based, it will call ext4_ext_map_blocks(), * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping * based files * * On success, it returns the number of blocks being mapped or allocated. * If flags doesn't contain EXT4_GET_BLOCKS_CREATE the blocks are * pre-allocated and unwritten, the resulting @map is marked as unwritten. * If the flags contain EXT4_GET_BLOCKS_CREATE, it will mark @map as mapped. * * It returns 0 if plain look up failed (blocks have not been allocated), in * that case, @map is returned as unmapped but we still do fill map->m_len to * indicate the length of a hole starting at map->m_lblk. * * It returns the error in case of allocation failure. */ int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct extent_status es; int retval; int ret = 0; #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif map->m_flags = 0; ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n", flags, map->m_len, (unsigned long) map->m_lblk); /* * ext4_map_blocks returns an int, and m_len is an unsigned int */ if (unlikely(map->m_len > INT_MAX)) map->m_len = INT_MAX; /* We can handle the block number less than EXT_MAX_BLOCKS */ if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) return -EFSCORRUPTED; /* Lookup extent status tree firstly */ if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) && ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; map->m_flags |= ext4_es_is_written(&es) ? EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; retval = es.es_len - (map->m_lblk - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { map->m_pblk = 0; map->m_flags |= ext4_es_is_delayed(&es) ? EXT4_MAP_DELAYED : 0; retval = es.es_len - (map->m_lblk - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; retval = 0; } else { BUG(); } if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT) return retval; #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(handle, inode, map, &orig_map, flags); #endif goto found; } /* * In the query cache no-wait mode, nothing we can do more if we * cannot find extent in the cache. */ if (flags & EXT4_GET_BLOCKS_CACHED_NOWAIT) return 0; /* * Try to see if we can get the block without requesting a new * file system block. */ down_read(&EXT4_I(inode)->i_data_sem); retval = ext4_map_query_blocks(handle, inode, map); up_read((&EXT4_I(inode)->i_data_sem)); found: if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; } /* If it is only a block(s) look up */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) return retval; /* * Returns if the blocks have already allocated * * Note that if blocks have been preallocated * ext4_ext_map_blocks() returns with buffer head unmapped */ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) /* * If we need to convert extent to unwritten * we continue and do the actual work in * ext4_ext_map_blocks() */ if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) return retval; /* * New blocks allocate and/or writing to unwritten extent * will possibly result in updating i_data, so we take * the write lock of i_data_sem, and call get_block() * with create == 1 flag. */ down_write(&EXT4_I(inode)->i_data_sem); retval = ext4_map_create_blocks(handle, inode, map, flags); up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; /* * Inodes with freshly allocated blocks where contents will be * visible after transaction commit must be on transaction's * ordered data list. */ if (map->m_flags & EXT4_MAP_NEW && !(map->m_flags & EXT4_MAP_UNWRITTEN) && !(flags & EXT4_GET_BLOCKS_ZERO) && !ext4_is_quota_file(inode) && ext4_should_order_data(inode)) { loff_t start_byte = (loff_t)map->m_lblk << inode->i_blkbits; loff_t length = (loff_t)map->m_len << inode->i_blkbits; if (flags & EXT4_GET_BLOCKS_IO_SUBMIT) ret = ext4_jbd2_inode_add_wait(handle, inode, start_byte, length); else ret = ext4_jbd2_inode_add_write(handle, inode, start_byte, length); if (ret) return ret; } } if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN || map->m_flags & EXT4_MAP_MAPPED)) ext4_fc_track_range(handle, inode, map->m_lblk, map->m_lblk + map->m_len - 1); if (retval < 0) ext_debug(inode, "failed with err %d\n", retval); return retval; } /* * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages * we have to be careful as someone else may be manipulating b_state as well. */ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags) { unsigned long old_state; unsigned long new_state; flags &= EXT4_MAP_FLAGS; /* Dummy buffer_head? Set non-atomically. */ if (!bh->b_folio) { bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags; return; } /* * Someone else may be modifying b_state. Be careful! This is ugly but * once we get rid of using bh as a container for mapping information * to pass to / from get_block functions, this can go away. */ old_state = READ_ONCE(bh->b_state); do { new_state = (old_state & ~EXT4_MAP_FLAGS) | flags; } while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state))); } static int _ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int flags) { struct ext4_map_blocks map; int ret = 0; if (ext4_has_inline_data(inode)) return -ERANGE; map.m_lblk = iblock; map.m_len = bh->b_size >> inode->i_blkbits; ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, flags); if (ret > 0) { map_bh(bh, inode->i_sb, map.m_pblk); ext4_update_bh_state(bh, map.m_flags); bh->b_size = inode->i_sb->s_blocksize * map.m_len; ret = 0; } else if (ret == 0) { /* hole case, need to fill in bh->b_size */ bh->b_size = inode->i_sb->s_blocksize * map.m_len; } return ret; } int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { return _ext4_get_block(inode, iblock, bh, create ? EXT4_GET_BLOCKS_CREATE : 0); } /* * Get block function used when preparing for buffered write if we require * creating an unwritten extent if blocks haven't been allocated. The extent * will be converted to written after the IO is complete. */ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret = 0; ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n", inode->i_ino, create); ret = _ext4_get_block(inode, iblock, bh_result, EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT); /* * If the buffer is marked unwritten, mark it as new to make sure it is * zeroed out correctly in case of partial writes. Otherwise, there is * a chance of stale data getting exposed. */ if (ret == 0 && buffer_unwritten(bh_result)) set_buffer_new(bh_result); return ret; } /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 /* * `handle' can be NULL if create is zero */ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct ext4_map_blocks map; struct buffer_head *bh; int create = map_flags & EXT4_GET_BLOCKS_CREATE; bool nowait = map_flags & EXT4_GET_BLOCKS_CACHED_NOWAIT; int err; ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) || handle != NULL || create == 0); ASSERT(create == 0 || !nowait); map.m_lblk = block; map.m_len = 1; err = ext4_map_blocks(handle, inode, &map, map_flags); if (err == 0) return create ? ERR_PTR(-ENOSPC) : NULL; if (err < 0) return ERR_PTR(err); if (nowait) return sb_find_get_block(inode->i_sb, map.m_pblk); /* * Since bh could introduce extra ref count such as referred by * journal_head etc. Try to avoid using __GFP_MOVABLE here * as it may fail the migration when journal_head remains. */ bh = getblk_unmovable(inode->i_sb->s_bdev, map.m_pblk, inode->i_sb->s_blocksize); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); if (map.m_flags & EXT4_MAP_NEW) { ASSERT(create != 0); ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) || (handle != NULL)); /* * Now that we do not always journal data, we should * keep in mind whether this should always journal the * new buffer as metadata. For now, regular file * writes use ext4_get_block instead, so it's not a * problem. */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); err = ext4_journal_get_create_access(handle, inode->i_sb, bh, EXT4_JTR_NONE); if (unlikely(err)) { unlock_buffer(bh); goto errout; } if (!buffer_uptodate(bh)) { memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (unlikely(err)) goto errout; } else BUFFER_TRACE(bh, "not a new buffer"); return bh; errout: brelse(bh); return ERR_PTR(err); } struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct buffer_head *bh; int ret; bh = ext4_getblk(handle, inode, block, map_flags); if (IS_ERR(bh)) return bh; if (!bh || ext4_buffer_uptodate(bh)) return bh; ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true); if (ret) { put_bh(bh); return ERR_PTR(ret); } return bh; } /* Read a contiguous batch of blocks. */ int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count, bool wait, struct buffer_head **bhs) { int i, err; for (i = 0; i < bh_count; i++) { bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */); if (IS_ERR(bhs[i])) { err = PTR_ERR(bhs[i]); bh_count = i; goto out_brelse; } } for (i = 0; i < bh_count; i++) /* Note that NULL bhs[i] is valid because of holes. */ if (bhs[i] && !ext4_buffer_uptodate(bhs[i])) ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false); if (!wait) return 0; for (i = 0; i < bh_count; i++) if (bhs[i]) wait_on_buffer(bhs[i]); for (i = 0; i < bh_count; i++) { if (bhs[i] && !buffer_uptodate(bhs[i])) { err = -EIO; goto out_brelse; } } return 0; out_brelse: for (i = 0; i < bh_count; i++) { brelse(bhs[i]); bhs[i] = NULL; } return err; } int ext4_walk_page_buffers(handle_t *handle, struct inode *inode, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)(handle_t *handle, struct inode *inode, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for (bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, inode, bh); if (!ret) ret = err; } return ret; } /* * Helper for handling dirtying of journalled data. We also mark the folio as * dirty so that writeback code knows about this page (and inode) contains * dirty data. ext4_writepages() then commits appropriate transaction to * make data stable. */ static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh) { folio_mark_dirty(bh->b_folio); return ext4_handle_dirty_metadata(handle, NULL, bh); } int do_journal_get_write_access(handle_t *handle, struct inode *inode, struct buffer_head *bh) { if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; BUFFER_TRACE(bh, "get write access"); return ext4_journal_get_write_access(handle, inode->i_sb, bh, EXT4_JTR_NONE); } int ext4_block_write_begin(handle_t *handle, struct folio *folio, loff_t pos, unsigned len, get_block_t *get_block) { unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; struct inode *inode = folio->mapping->host; unsigned block_start, block_end; sector_t block; int err = 0; unsigned blocksize = inode->i_sb->s_blocksize; unsigned bbits; struct buffer_head *bh, *head, *wait[2]; int nr_wait = 0; int i; bool should_journal_data = ext4_should_journal_data(inode); BUG_ON(!folio_test_locked(folio)); BUG_ON(from > PAGE_SIZE); BUG_ON(to > PAGE_SIZE); BUG_ON(from > to); head = folio_buffers(folio); if (!head) head = create_empty_buffers(folio, blocksize, 0); bbits = ilog2(blocksize); block = (sector_t)folio->index << (PAGE_SHIFT - bbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (folio_test_uptodate(folio)) { set_buffer_uptodate(bh); } continue; } if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; if (buffer_new(bh)) { /* * We may be zeroing partial buffers or all new * buffers in case of failure. Prepare JBD2 for * that. */ if (should_journal_data) do_journal_get_write_access(handle, inode, bh); if (folio_test_uptodate(folio)) { /* * Unlike __block_write_begin() we leave * dirtying of new uptodate buffers to * ->write_end() time or * folio_zero_new_buffers(). */ set_buffer_uptodate(bh); continue; } if (block_end > to || block_start < from) folio_zero_segments(folio, to, block_end, block_start, from); continue; } } if (folio_test_uptodate(folio)) { set_buffer_uptodate(bh); continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ext4_read_bh_lock(bh, 0, false); wait[nr_wait++] = bh; } } /* * If we issued read requests, let them complete. */ for (i = 0; i < nr_wait; i++) { wait_on_buffer(wait[i]); if (!buffer_uptodate(wait[i])) err = -EIO; } if (unlikely(err)) { if (should_journal_data) ext4_journalled_zero_new_buffers(handle, inode, folio, from, to); else folio_zero_new_buffers(folio, from, to); } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) { for (i = 0; i < nr_wait; i++) { int err2; err2 = fscrypt_decrypt_pagecache_blocks(folio, blocksize, bh_offset(wait[i])); if (err2) { clear_buffer_uptodate(wait[i]); err = err2; } } } return err; } /* * To preserve ordering, it is essential that the hole instantiation and * the data write be encapsulated in a single transaction. We cannot * close off a transaction and start a new one between the ext4_get_block() * and the ext4_write_end(). So doing the jbd2_journal_start at the start of * ext4_write_begin() is the right place. */ static int ext4_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { struct inode *inode = mapping->host; int ret, needed_blocks; handle_t *handle; int retries = 0; struct folio *folio; pgoff_t index; unsigned from, to; ret = ext4_emergency_state(inode->i_sb); if (unlikely(ret)) return ret; trace_ext4_write_begin(inode, pos, len); /* * Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; index = pos >> PAGE_SHIFT; from = pos & (PAGE_SIZE - 1); to = from + len; if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, foliop); if (ret < 0) return ret; if (ret == 1) return 0; } /* * __filemap_get_folio() can take a long time if the * system is thrashing due to memory pressure, or if the folio * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the folio (if needed) without using GFP_NOFS. */ retry_grab: folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) return PTR_ERR(folio); /* * The same as page allocation, we prealloc buffer heads before * starting the handle. */ if (!folio_buffers(folio)) create_empty_buffers(folio, inode->i_sb->s_blocksize, 0); folio_unlock(folio); retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); if (IS_ERR(handle)) { folio_put(folio); return PTR_ERR(handle); } folio_lock(folio); if (folio->mapping != mapping) { /* The folio got truncated from under us */ folio_unlock(folio); folio_put(folio); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the folio was unlocked */ folio_wait_stable(folio); if (ext4_should_dioread_nolock(inode)) ret = ext4_block_write_begin(handle, folio, pos, len, ext4_get_block_unwritten); else ret = ext4_block_write_begin(handle, folio, pos, len, ext4_get_block); if (!ret && ext4_should_journal_data(inode)) { ret = ext4_walk_page_buffers(handle, inode, folio_buffers(folio), from, to, NULL, do_journal_get_write_access); } if (ret) { bool extended = (pos + len > inode->i_size) && !ext4_verity_in_progress(inode); folio_unlock(folio); /* * ext4_block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_rwsem. * * Add inode to orphan list in case we crash before * truncate finishes */ if (extended && ext4_can_truncate(inode)) ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (extended) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might * still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; folio_put(folio); return ret; } *foliop = folio; return ret; } /* For write_end() in data=journal mode */ static int write_end_fn(handle_t *handle, struct inode *inode, struct buffer_head *bh) { int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; set_buffer_uptodate(bh); ret = ext4_dirty_journalled_data(handle, bh); clear_buffer_meta(bh); clear_buffer_prio(bh); return ret; } /* * We need to pick up the new inode size which generic_commit_write gave us * `file' can be NULL - eg, when called from page_symlink(). * * ext4 never places buffers on inode->i_mapping->i_private_list. metadata * buffers are managed internally. */ static int ext4_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; bool verity = ext4_verity_in_progress(inode); trace_ext4_write_end(inode, pos, len, copied); if (ext4_has_inline_data(inode) && ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) return ext4_write_inline_data_end(inode, pos, len, copied, folio); copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata); /* * it's important to update i_size while still holding folio lock: * page writeout could otherwise come in and zero beyond i_size. * * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree * blocks are being written past EOF, so skip the i_size update. */ if (!verity) i_size_changed = ext4_update_inode_size(inode, pos + copied); folio_unlock(folio); folio_put(folio); if (old_size < pos && !verity) { pagecache_isize_extended(inode, old_size, pos); ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size); } /* * Don't mark the inode dirty under folio lock. First, it unnecessarily * makes the holding time of folio lock longer. Second, it forces lock * ordering of folio lock and transaction start for journaling * filesystems. */ if (i_size_changed) ret = ext4_mark_inode_dirty(handle, inode); if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size && !verity) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * This is a private version of folio_zero_new_buffers() which doesn't * set the buffer to be dirty, since in data=journalled mode we need * to call ext4_dirty_journalled_data() instead. */ static void ext4_journalled_zero_new_buffers(handle_t *handle, struct inode *inode, struct folio *folio, unsigned from, unsigned to) { unsigned int block_start = 0, block_end; struct buffer_head *head, *bh; bh = head = folio_buffers(folio); do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!folio_test_uptodate(folio)) { unsigned start, size; start = max(from, block_start); size = min(to, block_end) - start; folio_zero_range(folio, start, size); } clear_buffer_new(bh); write_end_fn(handle, inode, bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } static int ext4_journalled_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int partial = 0; unsigned from, to; int size_changed = 0; bool verity = ext4_verity_in_progress(inode); trace_ext4_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_SIZE - 1); to = from + len; BUG_ON(!ext4_handle_valid(handle)); if (ext4_has_inline_data(inode)) return ext4_write_inline_data_end(inode, pos, len, copied, folio); if (unlikely(copied < len) && !folio_test_uptodate(folio)) { copied = 0; ext4_journalled_zero_new_buffers(handle, inode, folio, from, to); } else { if (unlikely(copied < len)) ext4_journalled_zero_new_buffers(handle, inode, folio, from + copied, to); ret = ext4_walk_page_buffers(handle, inode, folio_buffers(folio), from, from + copied, &partial, write_end_fn); if (!partial) folio_mark_uptodate(folio); } if (!verity) size_changed = ext4_update_inode_size(inode, pos + copied); EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; folio_unlock(folio); folio_put(folio); if (old_size < pos && !verity) { pagecache_isize_extended(inode, old_size, pos); ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size); } if (size_changed) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; } if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size && !verity) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * Reserve space for 'nr_resv' clusters */ static int ext4_da_reserve_space(struct inode *inode, int nr_resv) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); int ret; /* * We will charge metadata quota at writeout time; this saves * us from metadata over-estimation, though we may go over by * a small amount in the end. Here we just reserve for data. */ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, nr_resv)); if (ret) return ret; spin_lock(&ei->i_block_reservation_lock); if (ext4_claim_free_clusters(sbi, nr_resv, 0)) { spin_unlock(&ei->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, nr_resv)); return -ENOSPC; } ei->i_reserved_data_blocks += nr_resv; trace_ext4_da_reserve_space(inode, nr_resv); spin_unlock(&ei->i_block_reservation_lock); return 0; /* success */ } void ext4_da_release_space(struct inode *inode, int to_free) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); if (!to_free) return; /* Nothing to release, exit */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); trace_ext4_da_release_space(inode, to_free); if (unlikely(to_free > ei->i_reserved_data_blocks)) { /* * if there aren't enough reserved blocks, then the * counter is messed up somewhere. Since this * function is called from invalidate page, it's * harmless to return without any action. */ ext4_warning(inode->i_sb, "ext4_da_release_space: " "ino %lu, to_free %d with only %d reserved " "data blocks", inode->i_ino, to_free, ei->i_reserved_data_blocks); WARN_ON(1); to_free = ei->i_reserved_data_blocks; } ei->i_reserved_data_blocks -= to_free; /* update fs dirty data blocks counter */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); } /* * Delayed allocation stuff */ struct mpage_da_data { /* These are input fields for ext4_do_writepages() */ struct inode *inode; struct writeback_control *wbc; unsigned int can_map:1; /* Can writepages call map blocks? */ /* These are internal state of ext4_do_writepages() */ pgoff_t first_page; /* The first page to write */ pgoff_t next_page; /* Current page to examine */ pgoff_t last_page; /* Last page to examine */ /* * Extent to map - this can be after first_page because that can be * fully mapped. We somewhat abuse m_flags to store whether the extent * is delalloc or unwritten. */ struct ext4_map_blocks map; struct ext4_io_submit io_submit; /* IO submission data */ unsigned int do_map:1; unsigned int scanned_until_end:1; unsigned int journalled_more_data:1; }; static void mpage_release_unused_pages(struct mpage_da_data *mpd, bool invalidate) { unsigned nr, i; pgoff_t index, end; struct folio_batch fbatch; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; /* This is necessary when next_page == 0. */ if (mpd->first_page >= mpd->next_page) return; mpd->scanned_until_end = 0; index = mpd->first_page; end = mpd->next_page - 1; if (invalidate) { ext4_lblk_t start, last; start = index << (PAGE_SHIFT - inode->i_blkbits); last = end << (PAGE_SHIFT - inode->i_blkbits); /* * avoid racing with extent status tree scans made by * ext4_insert_delayed_block() */ down_write(&EXT4_I(inode)->i_data_sem); ext4_es_remove_extent(inode, start, last - start + 1); up_write(&EXT4_I(inode)->i_data_sem); } folio_batch_init(&fbatch); while (index <= end) { nr = filemap_get_folios(mapping, &index, end, &fbatch); if (nr == 0) break; for (i = 0; i < nr; i++) { struct folio *folio = fbatch.folios[i]; if (folio->index < mpd->first_page) continue; if (folio_next_index(folio) - 1 > end) continue; BUG_ON(!folio_test_locked(folio)); BUG_ON(folio_test_writeback(folio)); if (invalidate) { if (folio_mapped(folio)) folio_clear_dirty_for_io(folio); block_invalidate_folio(folio, 0, folio_size(folio)); folio_clear_uptodate(folio); } folio_unlock(folio); } folio_batch_release(&fbatch); } } static void ext4_print_free_blocks(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct super_block *sb = inode->i_sb; struct ext4_inode_info *ei = EXT4_I(inode); ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", EXT4_C2B(EXT4_SB(inode->i_sb), ext4_count_free_clusters(sb))); ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_freeclusters_counter))); ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_dirtyclusters_counter))); ext4_msg(sb, KERN_CRIT, "Block reservation details"); ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", ei->i_reserved_data_blocks); return; } /* * Check whether the cluster containing lblk has been allocated or has * delalloc reservation. * * Returns 0 if the cluster doesn't have either, 1 if it has delalloc * reservation, 2 if it's already been allocated, negative error code on * failure. */ static int ext4_clu_alloc_state(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int ret; /* Has delalloc reservation? */ if (ext4_es_scan_clu(inode, &ext4_es_is_delayed, lblk)) return 1; /* Already been allocated? */ if (ext4_es_scan_clu(inode, &ext4_es_is_mapped, lblk)) return 2; ret = ext4_clu_mapped(inode, EXT4_B2C(sbi, lblk)); if (ret < 0) return ret; if (ret > 0) return 2; return 0; } /* * ext4_insert_delayed_blocks - adds a multiple delayed blocks to the extents * status tree, incrementing the reserved * cluster/block count or making pending * reservations where needed * * @inode - file containing the newly added block * @lblk - start logical block to be added * @len - length of blocks to be added * * Returns 0 on success, negative error code on failure. */ static int ext4_insert_delayed_blocks(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int ret; bool lclu_allocated = false; bool end_allocated = false; ext4_lblk_t resv_clu; ext4_lblk_t end = lblk + len - 1; /* * If the cluster containing lblk or end is shared with a delayed, * written, or unwritten extent in a bigalloc file system, it's * already been accounted for and does not need to be reserved. * A pending reservation must be made for the cluster if it's * shared with a written or unwritten extent and doesn't already * have one. Written and unwritten extents can be purged from the * extents status tree if the system is under memory pressure, so * it's necessary to examine the extent tree if a search of the * extents status tree doesn't get a match. */ if (sbi->s_cluster_ratio == 1) { ret = ext4_da_reserve_space(inode, len); if (ret != 0) /* ENOSPC */ return ret; } else { /* bigalloc */ resv_clu = EXT4_B2C(sbi, end) - EXT4_B2C(sbi, lblk) + 1; ret = ext4_clu_alloc_state(inode, lblk); if (ret < 0) return ret; if (ret > 0) { resv_clu--; lclu_allocated = (ret == 2); } if (EXT4_B2C(sbi, lblk) != EXT4_B2C(sbi, end)) { ret = ext4_clu_alloc_state(inode, end); if (ret < 0) return ret; if (ret > 0) { resv_clu--; end_allocated = (ret == 2); } } if (resv_clu) { ret = ext4_da_reserve_space(inode, resv_clu); if (ret != 0) /* ENOSPC */ return ret; } } ext4_es_insert_delayed_extent(inode, lblk, len, lclu_allocated, end_allocated); return 0; } /* * Looks up the requested blocks and sets the delalloc extent map. * First try to look up for the extent entry that contains the requested * blocks in the extent status tree without i_data_sem, then try to look * up for the ondisk extent mapping with i_data_sem in read mode, * finally hold i_data_sem in write mode, looks up again and add a * delalloc extent entry if it still couldn't find any extent. Pass out * the mapped extent through @map and return 0 on success. */ static int ext4_da_map_blocks(struct inode *inode, struct ext4_map_blocks *map) { struct extent_status es; int retval; #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif map->m_flags = 0; ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len, (unsigned long) map->m_lblk); /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { map->m_len = min_t(unsigned int, map->m_len, es.es_len - (map->m_lblk - es.es_lblk)); if (ext4_es_is_hole(&es)) goto add_delayed; found: /* * Delayed extent could be allocated by fallocate. * So we need to check it. */ if (ext4_es_is_delayed(&es)) { map->m_flags |= EXT4_MAP_DELAYED; return 0; } map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; if (ext4_es_is_written(&es)) map->m_flags |= EXT4_MAP_MAPPED; else if (ext4_es_is_unwritten(&es)) map->m_flags |= EXT4_MAP_UNWRITTEN; else BUG(); #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); #endif return 0; } /* * Try to see if we can get the block without requesting a new * file system block. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_has_inline_data(inode)) retval = 0; else retval = ext4_map_query_blocks(NULL, inode, map); up_read(&EXT4_I(inode)->i_data_sem); if (retval) return retval < 0 ? retval : 0; add_delayed: down_write(&EXT4_I(inode)->i_data_sem); /* * Page fault path (ext4_page_mkwrite does not take i_rwsem) * and fallocate path (no folio lock) can race. Make sure we * lookup the extent status tree here again while i_data_sem * is held in write mode, before inserting a new da entry in * the extent status tree. */ if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { map->m_len = min_t(unsigned int, map->m_len, es.es_len - (map->m_lblk - es.es_lblk)); if (!ext4_es_is_hole(&es)) { up_write(&EXT4_I(inode)->i_data_sem); goto found; } } else if (!ext4_has_inline_data(inode)) { retval = ext4_map_query_blocks(NULL, inode, map); if (retval) { up_write(&EXT4_I(inode)->i_data_sem); return retval < 0 ? retval : 0; } } map->m_flags |= EXT4_MAP_DELAYED; retval = ext4_insert_delayed_blocks(inode, map->m_lblk, map->m_len); up_write(&EXT4_I(inode)->i_data_sem); return retval; } /* * This is a special get_block_t callback which is used by * ext4_da_write_begin(). It will either return mapped block or * reserve space for a single block. * * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. * We also have b_blocknr = -1 and b_bdev initialized properly * * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev * initialized properly. */ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { struct ext4_map_blocks map; sector_t invalid_block = ~((sector_t) 0xffff); int ret = 0; BUG_ON(create == 0); BUG_ON(bh->b_size != inode->i_sb->s_blocksize); if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; map.m_lblk = iblock; map.m_len = 1; /* * first, we need to know whether the block is allocated already * preallocated blocks are unmapped but should treated * the same as allocated blocks. */ ret = ext4_da_map_blocks(inode, &map); if (ret < 0) return ret; if (map.m_flags & EXT4_MAP_DELAYED) { map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); return 0; } map_bh(bh, inode->i_sb, map.m_pblk); ext4_update_bh_state(bh, map.m_flags); if (buffer_unwritten(bh)) { /* A delayed write to unwritten bh should be marked * new and mapped. Mapped ensures that we don't do * get_block multiple times when we write to the same * offset and new ensures that we do proper zero out * for partial write. */ set_buffer_new(bh); set_buffer_mapped(bh); } return 0; } static void mpage_folio_done(struct mpage_da_data *mpd, struct folio *folio) { mpd->first_page += folio_nr_pages(folio); folio_unlock(folio); } static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio) { size_t len; loff_t size; int err; BUG_ON(folio->index != mpd->first_page); folio_clear_dirty_for_io(folio); /* * We have to be very careful here! Nothing protects writeback path * against i_size changes and the page can be writeably mapped into * page tables. So an application can be growing i_size and writing * data through mmap while writeback runs. folio_clear_dirty_for_io() * write-protects our page in page tables and the page cannot get * written to again until we release folio lock. So only after * folio_clear_dirty_for_io() we are safe to sample i_size for * ext4_bio_write_folio() to zero-out tail of the written page. We rely * on the barrier provided by folio_test_clear_dirty() in * folio_clear_dirty_for_io() to make sure i_size is really sampled only * after page tables are updated. */ size = i_size_read(mpd->inode); len = folio_size(folio); if (folio_pos(folio) + len > size && !ext4_verity_in_progress(mpd->inode)) len = size & (len - 1); err = ext4_bio_write_folio(&mpd->io_submit, folio, len); if (!err) mpd->wbc->nr_to_write--; return err; } #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay)) /* * mballoc gives us at most this number of blocks... * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). * The rest of mballoc seems to handle chunks up to full group size. */ #define MAX_WRITEPAGES_EXTENT_LEN 2048 /* * mpage_add_bh_to_extent - try to add bh to extent of blocks to map * * @mpd - extent of blocks * @lblk - logical number of the block in the file * @bh - buffer head we want to add to the extent * * The function is used to collect contig. blocks in the same state. If the * buffer doesn't require mapping for writeback and we haven't started the * extent of buffers to map yet, the function returns 'true' immediately - the * caller can write the buffer right away. Otherwise the function returns true * if the block has been added to the extent, false if the block couldn't be * added. */ static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, struct buffer_head *bh) { struct ext4_map_blocks *map = &mpd->map; /* Buffer that doesn't need mapping for writeback? */ if (!buffer_dirty(bh) || !buffer_mapped(bh) || (!buffer_delay(bh) && !buffer_unwritten(bh))) { /* So far no extent to map => we write the buffer right away */ if (map->m_len == 0) return true; return false; } /* First block in the extent? */ if (map->m_len == 0) { /* We cannot map unless handle is started... */ if (!mpd->do_map) return false; map->m_lblk = lblk; map->m_len = 1; map->m_flags = bh->b_state & BH_FLAGS; return true; } /* Don't go larger than mballoc is willing to allocate */ if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) return false; /* Can we merge the block to our big extent? */ if (lblk == map->m_lblk + map->m_len && (bh->b_state & BH_FLAGS) == map->m_flags) { map->m_len++; return true; } return false; } /* * mpage_process_page_bufs - submit page buffers for IO or add them to extent * * @mpd - extent of blocks for mapping * @head - the first buffer in the page * @bh - buffer we should start processing from * @lblk - logical number of the block in the file corresponding to @bh * * Walk through page buffers from @bh upto @head (exclusive) and either submit * the page for IO if all buffers in this page were mapped and there's no * accumulated extent of buffers to map or add buffers in the page to the * extent of buffers to map. The function returns 1 if the caller can continue * by processing the next page, 0 if it should stop adding buffers to the * extent to map because we cannot extend it anymore. It can also return value * < 0 in case of error during IO submission. */ static int mpage_process_page_bufs(struct mpage_da_data *mpd, struct buffer_head *head, struct buffer_head *bh, ext4_lblk_t lblk) { struct inode *inode = mpd->inode; int err; ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) >> inode->i_blkbits; if (ext4_verity_in_progress(inode)) blocks = EXT_MAX_BLOCKS; do { BUG_ON(buffer_locked(bh)); if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { /* Found extent to map? */ if (mpd->map.m_len) return 0; /* Buffer needs mapping and handle is not started? */ if (!mpd->do_map) return 0; /* Everything mapped so far and we hit EOF */ break; } } while (lblk++, (bh = bh->b_this_page) != head); /* So far everything mapped? Submit the page for IO. */ if (mpd->map.m_len == 0) { err = mpage_submit_folio(mpd, head->b_folio); if (err < 0) return err; mpage_folio_done(mpd, head->b_folio); } if (lblk >= blocks) { mpd->scanned_until_end = 1; return 0; } return 1; } /* * mpage_process_folio - update folio buffers corresponding to changed extent * and may submit fully mapped page for IO * @mpd: description of extent to map, on return next extent to map * @folio: Contains these buffers. * @m_lblk: logical block mapping. * @m_pblk: corresponding physical mapping. * @map_bh: determines on return whether this page requires any further * mapping or not. * * Scan given folio buffers corresponding to changed extent and update buffer * state according to new extent state. * We map delalloc buffers to their physical location, clear unwritten bits. * If the given folio is not fully mapped, we update @mpd to the next extent in * the given folio that needs mapping & return @map_bh as true. */ static int mpage_process_folio(struct mpage_da_data *mpd, struct folio *folio, ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk, bool *map_bh) { struct buffer_head *head, *bh; ext4_io_end_t *io_end = mpd->io_submit.io_end; ext4_lblk_t lblk = *m_lblk; ext4_fsblk_t pblock = *m_pblk; int err = 0; int blkbits = mpd->inode->i_blkbits; ssize_t io_end_size = 0; struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end); bh = head = folio_buffers(folio); do { if (lblk < mpd->map.m_lblk) continue; if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { /* * Buffer after end of mapped extent. * Find next buffer in the folio to map. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; io_end_vec->size += io_end_size; err = mpage_process_page_bufs(mpd, head, bh, lblk); if (err > 0) err = 0; if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) { io_end_vec = ext4_alloc_io_end_vec(io_end); if (IS_ERR(io_end_vec)) { err = PTR_ERR(io_end_vec); goto out; } io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits; } *map_bh = true; goto out; } if (buffer_delay(bh)) { clear_buffer_delay(bh); bh->b_blocknr = pblock++; } clear_buffer_unwritten(bh); io_end_size += (1 << blkbits); } while (lblk++, (bh = bh->b_this_page) != head); io_end_vec->size += io_end_size; *map_bh = false; out: *m_lblk = lblk; *m_pblk = pblock; return err; } /* * mpage_map_buffers - update buffers corresponding to changed extent and * submit fully mapped pages for IO * * @mpd - description of extent to map, on return next extent to map * * Scan buffers corresponding to changed extent (we expect corresponding pages * to be already locked) and update buffer state according to new extent state. * We map delalloc buffers to their physical location, clear unwritten bits, * and mark buffers as uninit when we perform writes to unwritten extents * and do extent conversion after IO is finished. If the last page is not fully * mapped, we update @map to the next extent in the last page that needs * mapping. Otherwise we submit the page for IO. */ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) { struct folio_batch fbatch; unsigned nr, i; struct inode *inode = mpd->inode; int bpp_bits = PAGE_SHIFT - inode->i_blkbits; pgoff_t start, end; ext4_lblk_t lblk; ext4_fsblk_t pblock; int err; bool map_bh = false; start = mpd->map.m_lblk >> bpp_bits; end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; lblk = start << bpp_bits; pblock = mpd->map.m_pblk; folio_batch_init(&fbatch); while (start <= end) { nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch); if (nr == 0) break; for (i = 0; i < nr; i++) { struct folio *folio = fbatch.folios[i]; err = mpage_process_folio(mpd, folio, &lblk, &pblock, &map_bh); /* * If map_bh is true, means page may require further bh * mapping, or maybe the page was submitted for IO. * So we return to call further extent mapping. */ if (err < 0 || map_bh) goto out; /* Page fully mapped - let IO run! */ err = mpage_submit_folio(mpd, folio); if (err < 0) goto out; mpage_folio_done(mpd, folio); } folio_batch_release(&fbatch); } /* Extent fully mapped and matches with page boundary. We are done. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; return 0; out: folio_batch_release(&fbatch); return err; } static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) { struct inode *inode = mpd->inode; struct ext4_map_blocks *map = &mpd->map; int get_blocks_flags; int err, dioread_nolock; trace_ext4_da_write_pages_extent(inode, map); /* * Call ext4_map_blocks() to allocate any delayed allocation blocks, or * to convert an unwritten extent to be initialized (in the case * where we have written into one or more preallocated blocks). It is * possible that we're going to need more metadata blocks than * previously reserved. However we must not fail because we're in * writeback and there is nothing we can do about it so it might result * in data loss. So use reserved blocks to allocate metadata if * possible. */ get_blocks_flags = EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_GET_BLOCKS_IO_SUBMIT; dioread_nolock = ext4_should_dioread_nolock(inode); if (dioread_nolock) get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; err = ext4_map_blocks(handle, inode, map, get_blocks_flags); if (err < 0) return err; if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { if (!mpd->io_submit.io_end->handle && ext4_handle_valid(handle)) { mpd->io_submit.io_end->handle = handle->h_rsv_handle; handle->h_rsv_handle = NULL; } ext4_set_io_unwritten_flag(mpd->io_submit.io_end); } BUG_ON(map->m_len == 0); return 0; } /* * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length *