Total coverage: 55455 (5%)of 1171465
9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 // SPDX-License-Identifier: GPL-2.0-only /* * V4L2 sub-device * * Copyright (C) 2010 Nokia Corporation * * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> */ #include <linux/export.h> #include <linux/ioctl.h> #include <linux/leds.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/overflow.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <linux/version.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fh.h> #include <media/v4l2-ioctl.h> #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) /* * The Streams API is an experimental feature. To use the Streams API, set * 'v4l2_subdev_enable_streams_api' to 1 below. */ static bool v4l2_subdev_enable_streams_api; #endif /* * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set * of streams. * * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX * restricts the total number of streams in a pad, although the stream ID is * not restricted. */ #define V4L2_SUBDEV_MAX_STREAM_ID 63 #include "v4l2-subdev-priv.h" #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd) { struct v4l2_subdev_state *state; static struct lock_class_key key; state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key); if (IS_ERR(state)) return PTR_ERR(state); fh->state = state; return 0; } static void subdev_fh_free(struct v4l2_subdev_fh *fh) { __v4l2_subdev_state_free(fh->state); fh->state = NULL; } static int subdev_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_subdev_fh *subdev_fh; int ret; subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL); if (subdev_fh == NULL) return -ENOMEM; ret = subdev_fh_init(subdev_fh, sd); if (ret) { kfree(subdev_fh); return ret; } v4l2_fh_init(&subdev_fh->vfh, vdev); v4l2_fh_add(&subdev_fh->vfh); file->private_data = &subdev_fh->vfh; if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) { struct module *owner; owner = sd->entity.graph_obj.mdev->dev->driver->owner; if (!try_module_get(owner)) { ret = -EBUSY; goto err; } subdev_fh->owner = owner; } if (sd->internal_ops && sd->internal_ops->open) { ret = sd->internal_ops->open(sd, subdev_fh); if (ret < 0) goto err; } return 0; err: module_put(subdev_fh->owner); v4l2_fh_del(&subdev_fh->vfh); v4l2_fh_exit(&subdev_fh->vfh); subdev_fh_free(subdev_fh); kfree(subdev_fh); return ret; } static int subdev_close(struct file *file) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file->private_data; struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); if (sd->internal_ops && sd->internal_ops->close) sd->internal_ops->close(sd, subdev_fh); module_put(subdev_fh->owner); v4l2_fh_del(vfh); v4l2_fh_exit(vfh); subdev_fh_free(subdev_fh); kfree(subdev_fh); file->private_data = NULL; return 0; } #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static int subdev_open(struct file *file) { return -ENODEV; } static int subdev_close(struct file *file) { return -ENODEV; } #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static void v4l2_subdev_enable_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) if (!IS_ERR_OR_NULL(sd->privacy_led)) led_set_brightness(sd->privacy_led, sd->privacy_led->max_brightness); #endif } static void v4l2_subdev_disable_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) if (!IS_ERR_OR_NULL(sd->privacy_led)) led_set_brightness(sd->privacy_led, 0); #endif } static inline int check_which(u32 which) { if (which != V4L2_SUBDEV_FORMAT_TRY && which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; return 0; } static inline int check_pad(struct v4l2_subdev *sd, u32 pad) { #if defined(CONFIG_MEDIA_CONTROLLER) if (sd->entity.num_pads) { if (pad >= sd->entity.num_pads) return -EINVAL; return 0; } #endif /* allow pad 0 on subdevices not registered as media entities */ if (pad > 0) return -EINVAL; return 0; } static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 which, u32 pad, u32 stream) { if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) if (!v4l2_subdev_state_get_format(state, pad, stream)) return -EINVAL; return 0; #else return -EINVAL; #endif } if (stream != 0) return -EINVAL; if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads)) return -EINVAL; return 0; } static inline int check_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { if (!format) return -EINVAL; return check_which(format->which) ? : check_pad(sd, format->pad) ? : check_state(sd, state, format->which, format->pad, format->stream); } static int call_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { return check_format(sd, state, format) ? : sd->ops->pad->get_fmt(sd, state, format); } static int call_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { return check_format(sd, state, format) ? : sd->ops->pad->set_fmt(sd, state, format); } static int call_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code) { if (!code) return -EINVAL; return check_which(code->which) ? : check_pad(sd, code->pad) ? : check_state(sd, state, code->which, code->pad, code->stream) ? : sd->ops->pad->enum_mbus_code(sd, state, code); } static int call_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_size_enum *fse) { if (!fse) return -EINVAL; return check_which(fse->which) ? : check_pad(sd, fse->pad) ? : check_state(sd, state, fse->which, fse->pad, fse->stream) ? : sd->ops->pad->enum_frame_size(sd, state, fse); } static int call_enum_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval_enum *fie) { if (!fie) return -EINVAL; return check_which(fie->which) ? : check_pad(sd, fie->pad) ? : check_state(sd, state, fie->which, fie->pad, fie->stream) ? : sd->ops->pad->enum_frame_interval(sd, state, fie); } static inline int check_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { if (!sel) return -EINVAL; return check_which(sel->which) ? : check_pad(sd, sel->pad) ? : check_state(sd, state, sel->which, sel->pad, sel->stream); } static int call_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { return check_selection(sd, state, sel) ? : sd->ops->pad->get_selection(sd, state, sel); } static int call_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { return check_selection(sd, state, sel) ? : sd->ops->pad->set_selection(sd, state, sel); } static inline int check_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { if (!fi) return -EINVAL; return check_which(fi->which) ? : check_pad(sd, fi->pad) ? : check_state(sd, state, fi->which, fi->pad, fi->stream); } static int call_get_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { return check_frame_interval(sd, state, fi) ? : sd->ops->pad->get_frame_interval(sd, state, fi); } static int call_set_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { return check_frame_interval(sd, state, fi) ? : sd->ops->pad->set_frame_interval(sd, state, fi); } static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_mbus_frame_desc *fd) { unsigned int i; int ret; #if defined(CONFIG_MEDIA_CONTROLLER) if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP; #endif memset(fd, 0, sizeof(*fd)); ret = sd->ops->pad->get_frame_desc(sd, pad, fd); if (ret) return ret; dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad, fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" : fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" : "unknown"); for (i = 0; i < fd->num_entries; i++) { struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i]; char buf[20] = ""; if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) WARN_ON(snprintf(buf, sizeof(buf), ", vc %u, dt 0x%02x", entry->bus.csi2.vc, entry->bus.csi2.dt) >= sizeof(buf)); dev_dbg(sd->dev, "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n", entry->stream, entry->pixelcode, entry->length, entry->flags, buf); } return 0; } static inline int check_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) { if (!edid) return -EINVAL; if (edid->blocks && edid->edid == NULL) return -EINVAL; return check_pad(sd, edid->pad); } static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) { return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid); } static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid) { return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid); } static int call_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_dv_timings *timings) { if (!timings) return -EINVAL; return check_pad(sd, pad) ? : sd->ops->pad->s_dv_timings(sd, pad, timings); } static int call_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_dv_timings *timings) { if (!timings) return -EINVAL; return check_pad(sd, pad) ? : sd->ops->pad->g_dv_timings(sd, pad, timings); } static int call_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_dv_timings *timings) { if (!timings) return -EINVAL; return check_pad(sd, pad) ? : sd->ops->pad->query_dv_timings(sd, pad, timings); } static int call_dv_timings_cap(struct v4l2_subdev *sd, struct v4l2_dv_timings_cap *cap) { if (!cap) return -EINVAL; return check_pad(sd, cap->pad) ? : sd->ops->pad->dv_timings_cap(sd, cap); } static int call_enum_dv_timings(struct v4l2_subdev *sd, struct v4l2_enum_dv_timings *dvt) { if (!dvt) return -EINVAL; return check_pad(sd, dvt->pad) ? : sd->ops->pad->enum_dv_timings(sd, dvt); } static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_mbus_config *config) { memset(config, 0, sizeof(*config)); return check_pad(sd, pad) ? : sd->ops->pad->get_mbus_config(sd, pad, config); } static int call_s_stream(struct v4l2_subdev *sd, int enable) { int ret; /* * The .s_stream() operation must never be called to start or stop an * already started or stopped subdev. Catch offenders but don't return * an error yet to avoid regressions. */ if (WARN_ON(sd->s_stream_enabled == !!enable)) return 0; ret = sd->ops->video->s_stream(sd, enable); if (!enable && ret < 0) { dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret); ret = 0; } if (!ret) { sd->s_stream_enabled = enable; if (enable) v4l2_subdev_enable_privacy_led(sd); else v4l2_subdev_disable_privacy_led(sd); } return ret; } #ifdef CONFIG_MEDIA_CONTROLLER /* * Create state-management wrapper for pad ops dealing with subdev state. The * wrapper handles the case where the caller does not provide the called * subdev's state. This should be removed when all the callers are fixed. */ #define DEFINE_STATE_WRAPPER(f, arg_type) \ static int call_##f##_state(struct v4l2_subdev *sd, \ struct v4l2_subdev_state *_state, \ arg_type *arg) \ { \ struct v4l2_subdev_state *state = _state; \ int ret; \ if (!_state) \ state = v4l2_subdev_lock_and_get_active_state(sd); \ ret = call_##f(sd, state, arg); \ if (!_state && state) \ v4l2_subdev_unlock_state(state); \ return ret; \ } #else /* CONFIG_MEDIA_CONTROLLER */ #define DEFINE_STATE_WRAPPER(f, arg_type) \ static int call_##f##_state(struct v4l2_subdev *sd, \ struct v4l2_subdev_state *state, \ arg_type *arg) \ { \ return call_##f(sd, state, arg); \ } #endif /* CONFIG_MEDIA_CONTROLLER */ DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format); DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format); DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum); DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum); DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum); DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection); DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection); static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = { .get_fmt = call_get_fmt_state, .set_fmt = call_set_fmt_state, .enum_mbus_code = call_enum_mbus_code_state, .enum_frame_size = call_enum_frame_size_state, .enum_frame_interval = call_enum_frame_interval_state, .get_selection = call_get_selection_state, .set_selection = call_set_selection_state, .get_frame_interval = call_get_frame_interval, .set_frame_interval = call_set_frame_interval, .get_edid = call_get_edid, .set_edid = call_set_edid, .s_dv_timings = call_s_dv_timings, .g_dv_timings = call_g_dv_timings, .query_dv_timings = call_query_dv_timings, .dv_timings_cap = call_dv_timings_cap, .enum_dv_timings = call_enum_dv_timings, .get_frame_desc = call_get_frame_desc, .get_mbus_config = call_get_mbus_config, }; static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = { .s_stream = call_s_stream, }; const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = { .pad = &v4l2_subdev_call_pad_wrappers, .video = &v4l2_subdev_call_video_wrappers, }; EXPORT_SYMBOL(v4l2_subdev_call_wrappers); #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static struct v4l2_subdev_state * subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh, unsigned int cmd, void *arg) { u32 which; switch (cmd) { default: return NULL; case VIDIOC_SUBDEV_G_FMT: case VIDIOC_SUBDEV_S_FMT: which = ((struct v4l2_subdev_format *)arg)->which; break; case VIDIOC_SUBDEV_G_CROP: case VIDIOC_SUBDEV_S_CROP: which = ((struct v4l2_subdev_crop *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_MBUS_CODE: which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: which = ((struct v4l2_subdev_frame_size_enum *)arg)->which; break; case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which; break; case VIDIOC_SUBDEV_G_SELECTION: case VIDIOC_SUBDEV_S_SELECTION: which = ((struct v4l2_subdev_selection *)arg)->which; break; case VIDIOC_SUBDEV_G_FRAME_INTERVAL: case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval *fi = arg; if (!(subdev_fh->client_caps & V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH)) fi->which = V4L2_SUBDEV_FORMAT_ACTIVE; which = fi->which; break; } case VIDIOC_SUBDEV_G_ROUTING: case VIDIOC_SUBDEV_S_ROUTING: which = ((struct v4l2_subdev_routing *)arg)->which; break; } return which == V4L2_SUBDEV_FORMAT_TRY ? subdev_fh->state : v4l2_subdev_get_unlocked_active_state(sd); } static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg, struct v4l2_subdev_state *state) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file->private_data; struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags); bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS; bool client_supports_streams = subdev_fh->client_caps & V4L2_SUBDEV_CLIENT_CAP_STREAMS; int rval; /* * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS. * Remove this when the API is no longer experimental. */ if (!v4l2_subdev_enable_streams_api) streams_subdev = false; switch (cmd) { case VIDIOC_SUBDEV_QUERYCAP: { struct v4l2_subdev_capability *cap = arg; memset(cap->reserved, 0, sizeof(cap->reserved)); cap->version = LINUX_VERSION_CODE; cap->capabilities = (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) | (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0); return 0; } case VIDIOC_QUERYCTRL: /* * TODO: this really should be folded into v4l2_queryctrl (this * currently returns -EINVAL for NULL control handlers). * However, v4l2_queryctrl() is still called directly by * drivers as well and until that has been addressed I believe * it is safer to do the check here. The same is true for the * other control ioctls below. */ if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_queryctrl(vfh->ctrl_handler, arg); case VIDIOC_QUERY_EXT_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg); case VIDIOC_QUERYMENU: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_querymenu(vfh->ctrl_handler, arg); case VIDIOC_G_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_g_ctrl(vfh->ctrl_handler, arg); case VIDIOC_S_CTRL: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg); case VIDIOC_G_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_g_ext_ctrls(vfh->ctrl_handler, vdev, sd->v4l2_dev->mdev, arg); case VIDIOC_S_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vdev, sd->v4l2_dev->mdev, arg); case VIDIOC_TRY_EXT_CTRLS: if (!vfh->ctrl_handler) return -ENOTTY; return v4l2_try_ext_ctrls(vfh->ctrl_handler, vdev, sd->v4l2_dev->mdev, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) return -ENOIOCTLCMD; return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK); case VIDIOC_SUBSCRIBE_EVENT: if (v4l2_subdev_has_op(sd, core, subscribe_event)) return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); if ((sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) && vfh->ctrl_handler) return v4l2_ctrl_subdev_subscribe_event(sd, vfh, arg); return -ENOIOCTLCMD; case VIDIOC_UNSUBSCRIBE_EVENT: if (v4l2_subdev_has_op(sd, core, unsubscribe_event)) return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) return v4l2_event_subdev_unsubscribe(sd, vfh, arg); return -ENOIOCTLCMD; #ifdef CONFIG_VIDEO_ADV_DEBUG case VIDIOC_DBG_G_REGISTER: { struct v4l2_dbg_register *p = arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, g_register, p); } case VIDIOC_DBG_S_REGISTER: { struct v4l2_dbg_register *p = arg; if (!capable(CAP_SYS_ADMIN)) return -EPERM; return v4l2_subdev_call(sd, core, s_register, p); } case VIDIOC_DBG_G_CHIP_INFO: { struct v4l2_dbg_chip_info *p = arg; if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr) return -EINVAL; if (sd->ops->core && sd->ops->core->s_register) p->flags |= V4L2_CHIP_FL_WRITABLE; if (sd->ops->core && sd->ops->core->g_register) p->flags |= V4L2_CHIP_FL_READABLE; strscpy(p->name, sd->name, sizeof(p->name)); return 0; } #endif case VIDIOC_LOG_STATUS: { int ret; pr_info("%s: ================= START STATUS =================\n", sd->name); ret = v4l2_subdev_call(sd, core, log_status); pr_info("%s: ================== END STATUS ==================\n", sd->name); return ret; } case VIDIOC_SUBDEV_G_FMT: { struct v4l2_subdev_format *format = arg; if (!client_supports_streams) format->stream = 0; memset(format->reserved, 0, sizeof(format->reserved)); memset(format->format.reserved, 0, sizeof(format->format.reserved)); return v4l2_subdev_call(sd, pad, get_fmt, state, format); } case VIDIOC_SUBDEV_S_FMT: { struct v4l2_subdev_format *format = arg; if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (!client_supports_streams) format->stream = 0; memset(format->reserved, 0, sizeof(format->reserved)); memset(format->format.reserved, 0, sizeof(format->format.reserved)); return v4l2_subdev_call(sd, pad, set_fmt, state, format); } case VIDIOC_SUBDEV_G_CROP: { struct v4l2_subdev_crop *crop = arg; struct v4l2_subdev_selection sel; if (!client_supports_streams) crop->stream = 0; memset(crop->reserved, 0, sizeof(crop->reserved)); memset(&sel, 0, sizeof(sel)); sel.which = crop->which; sel.pad = crop->pad; sel.stream = crop->stream; sel.target = V4L2_SEL_TGT_CROP; rval = v4l2_subdev_call( sd, pad, get_selection, state, &sel); crop->rect = sel.r; return rval; } case VIDIOC_SUBDEV_S_CROP: { struct v4l2_subdev_crop *crop = arg; struct v4l2_subdev_selection sel; if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (!client_supports_streams) crop->stream = 0; memset(crop->reserved, 0, sizeof(crop->reserved)); memset(&sel, 0, sizeof(sel)); sel.which = crop->which; sel.pad = crop->pad; sel.stream = crop->stream; sel.target = V4L2_SEL_TGT_CROP; sel.r = crop->rect; rval = v4l2_subdev_call( sd, pad, set_selection, state, &sel); crop->rect = sel.r; return rval; } case VIDIOC_SUBDEV_ENUM_MBUS_CODE: { struct v4l2_subdev_mbus_code_enum *code = arg; if (!client_supports_streams) code->stream = 0; memset(code->reserved, 0, sizeof(code->reserved)); return v4l2_subdev_call(sd, pad, enum_mbus_code, state, code); } case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: { struct v4l2_subdev_frame_size_enum *fse = arg; if (!client_supports_streams) fse->stream = 0; memset(fse->reserved, 0, sizeof(fse->reserved)); return v4l2_subdev_call(sd, pad, enum_frame_size, state, fse); } case VIDIOC_SUBDEV_G_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval *fi = arg; if (!client_supports_streams) fi->stream = 0; memset(fi->reserved, 0, sizeof(fi->reserved)); return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi); } case VIDIOC_SUBDEV_S_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval *fi = arg; if (!client_supports_streams) fi->stream = 0; if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; memset(fi->reserved, 0, sizeof(fi->reserved)); return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi); } case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: { struct v4l2_subdev_frame_interval_enum *fie = arg; if (!client_supports_streams) fie->stream = 0; memset(fie->reserved, 0, sizeof(fie->reserved)); return v4l2_subdev_call(sd, pad, enum_frame_interval, state, fie); } case VIDIOC_SUBDEV_G_SELECTION: { struct v4l2_subdev_selection *sel = arg; if (!client_supports_streams) sel->stream = 0; memset(sel->reserved, 0, sizeof(sel->reserved)); return v4l2_subdev_call( sd, pad, get_selection, state, sel); } case VIDIOC_SUBDEV_S_SELECTION: { struct v4l2_subdev_selection *sel = arg; if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (!client_supports_streams) sel->stream = 0; memset(sel->reserved, 0, sizeof(sel->reserved)); return v4l2_subdev_call( sd, pad, set_selection, state, sel); } case VIDIOC_G_EDID: { struct v4l2_subdev_edid *edid = arg; return v4l2_subdev_call(sd, pad, get_edid, edid); } case VIDIOC_S_EDID: { struct v4l2_subdev_edid *edid = arg; return v4l2_subdev_call(sd, pad, set_edid, edid); } case VIDIOC_SUBDEV_DV_TIMINGS_CAP: { struct v4l2_dv_timings_cap *cap = arg; return v4l2_subdev_call(sd, pad, dv_timings_cap, cap); } case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: { struct v4l2_enum_dv_timings *dvt = arg; return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt); } case VIDIOC_SUBDEV_QUERY_DV_TIMINGS: return v4l2_subdev_call(sd, pad, query_dv_timings, 0, arg); case VIDIOC_SUBDEV_G_DV_TIMINGS: return v4l2_subdev_call(sd, pad, g_dv_timings, 0, arg); case VIDIOC_SUBDEV_S_DV_TIMINGS: if (ro_subdev) return -EPERM; return v4l2_subdev_call(sd, pad, s_dv_timings, 0, arg); case VIDIOC_SUBDEV_G_STD: return v4l2_subdev_call(sd, video, g_std, arg); case VIDIOC_SUBDEV_S_STD: { v4l2_std_id *std = arg; if (ro_subdev) return -EPERM; return v4l2_subdev_call(sd, video, s_std, *std); } case VIDIOC_SUBDEV_ENUMSTD: { struct v4l2_standard *p = arg; v4l2_std_id id; if (v4l2_subdev_call(sd, video, g_tvnorms, &id)) return -EINVAL; return v4l_video_std_enumstd(p, id); } case VIDIOC_SUBDEV_QUERYSTD: return v4l2_subdev_call(sd, video, querystd, arg); case VIDIOC_SUBDEV_G_ROUTING: { struct v4l2_subdev_routing *routing = arg; struct v4l2_subdev_krouting *krouting; if (!v4l2_subdev_enable_streams_api) return -ENOIOCTLCMD; if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) return -ENOIOCTLCMD; memset(routing->reserved, 0, sizeof(routing->reserved)); krouting = &state->routing; memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, krouting->routes, min(krouting->num_routes, routing->len_routes) * sizeof(*krouting->routes)); routing->num_routes = krouting->num_routes; return 0; } case VIDIOC_SUBDEV_S_ROUTING: { struct v4l2_subdev_routing *routing = arg; struct v4l2_subdev_route *routes = (struct v4l2_subdev_route *)(uintptr_t)routing->routes; struct v4l2_subdev_krouting krouting = {}; unsigned int i; if (!v4l2_subdev_enable_streams_api) return -ENOIOCTLCMD; if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) return -ENOIOCTLCMD; if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev) return -EPERM; if (routing->num_routes > routing->len_routes) return -EINVAL; memset(routing->reserved, 0, sizeof(routing->reserved)); for (i = 0; i < routing->num_routes; ++i) { const struct v4l2_subdev_route *route = &routes[i]; const struct media_pad *pads = sd->entity.pads; if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID || route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID) return -EINVAL; if (route->sink_pad >= sd->entity.num_pads) return -EINVAL; if (!(pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) return -EINVAL; if (route->source_pad >= sd->entity.num_pads) return -EINVAL; if (!(pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) return -EINVAL; } /* * If the driver doesn't support setting routing, just return * the routing table. */ if (!v4l2_subdev_has_op(sd, pad, set_routing)) { memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, state->routing.routes, min(state->routing.num_routes, routing->len_routes) * sizeof(*state->routing.routes)); routing->num_routes = state->routing.num_routes; return 0; } krouting.num_routes = routing->num_routes; krouting.len_routes = routing->len_routes; krouting.routes = routes; rval = v4l2_subdev_call(sd, pad, set_routing, state, routing->which, &krouting); if (rval < 0) return rval; memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes, state->routing.routes, min(state->routing.num_routes, routing->len_routes) * sizeof(*state->routing.routes)); routing->num_routes = state->routing.num_routes; return 0; } case VIDIOC_SUBDEV_G_CLIENT_CAP: { struct v4l2_subdev_client_capability *client_cap = arg; client_cap->capabilities = subdev_fh->client_caps; return 0; } case VIDIOC_SUBDEV_S_CLIENT_CAP: { struct v4l2_subdev_client_capability *client_cap = arg; /* * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not * enabled. Remove this when streams API is no longer * experimental. */ if (!v4l2_subdev_enable_streams_api) client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS; /* Filter out unsupported capabilities */ client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS | V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH); subdev_fh->client_caps = client_cap->capabilities; return 0; } default: return v4l2_subdev_call(sd, core, ioctl, cmd, arg); } return 0; } static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg) { struct video_device *vdev = video_devdata(file); struct mutex *lock = vdev->lock; long ret = -ENODEV; if (lock && mutex_lock_interruptible(lock)) return -ERESTARTSYS; if (video_is_registered(vdev)) { struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *vfh = file->private_data; struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh); struct v4l2_subdev_state *state; state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg); if (state) v4l2_subdev_lock_state(state); ret = subdev_do_ioctl(file, cmd, arg, state); if (state) v4l2_subdev_unlock_state(state); } if (lock) mutex_unlock(lock); return ret; } static long subdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock); } #ifdef CONFIG_COMPAT static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg); } #endif #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static long subdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return -ENODEV; } #ifdef CONFIG_COMPAT static long subdev_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg) { return -ENODEV; } #endif #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static __poll_t subdev_poll(struct file *file, poll_table *wait) { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); struct v4l2_fh *fh = file->private_data; if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) return EPOLLERR; poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) return EPOLLPRI; return 0; } const struct v4l2_file_operations v4l2_subdev_fops = { .owner = THIS_MODULE, .open = subdev_open, .unlocked_ioctl = subdev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl32 = subdev_compat_ioctl32, #endif .release = subdev_close, .poll = subdev_poll, }; #ifdef CONFIG_MEDIA_CONTROLLER int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, struct fwnode_endpoint *endpoint) { struct fwnode_handle *fwnode; struct v4l2_subdev *sd; if (!is_media_entity_v4l2_subdev(entity)) return -EINVAL; sd = media_entity_to_v4l2_subdev(entity); fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode); fwnode_handle_put(fwnode); if (device_match_fwnode(sd->dev, fwnode)) return endpoint->port; return -ENXIO; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1); int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { bool pass = true; /* The width, height and code must match. */ if (source_fmt->format.width != sink_fmt->format.width) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: width does not match (source %u, sink %u)\n", __func__, source_fmt->format.width, sink_fmt->format.width); pass = false; } if (source_fmt->format.height != sink_fmt->format.height) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: height does not match (source %u, sink %u)\n", __func__, source_fmt->format.height, sink_fmt->format.height); pass = false; } if (source_fmt->format.code != sink_fmt->format.code) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n", __func__, source_fmt->format.code, sink_fmt->format.code); pass = false; } /* The field order must match, or the sink field order must be NONE * to support interlaced hardware connected to bridges that support * progressive formats only. */ if (source_fmt->format.field != sink_fmt->format.field && sink_fmt->format.field != V4L2_FIELD_NONE) { dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: field does not match (source %u, sink %u)\n", __func__, source_fmt->format.field, sink_fmt->format.field); pass = false; } if (pass) return 0; dev_dbg(sd->entity.graph_obj.mdev->dev, "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__, link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); return -EPIPE; } EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default); static int v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream, struct v4l2_subdev_format *fmt, bool states_locked) { struct v4l2_subdev_state *state; struct v4l2_subdev *sd; int ret; sd = media_entity_to_v4l2_subdev(pad->entity); fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; fmt->pad = pad->index; fmt->stream = stream; if (states_locked) state = v4l2_subdev_get_locked_active_state(sd); else state = v4l2_subdev_lock_and_get_active_state(sd); ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt); if (!states_locked && state) v4l2_subdev_unlock_state(state); return ret; } #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static void __v4l2_link_validate_get_streams(struct media_pad *pad, u64 *streams_mask, bool states_locked) { struct v4l2_subdev_route *route; struct v4l2_subdev_state *state; struct v4l2_subdev *subdev; subdev = media_entity_to_v4l2_subdev(pad->entity); *streams_mask = 0; if (states_locked) state = v4l2_subdev_get_locked_active_state(subdev); else state = v4l2_subdev_lock_and_get_active_state(subdev); if (WARN_ON(!state)) return; for_each_active_route(&state->routing, route) { u32 route_pad; u32 route_stream; if (pad->flags & MEDIA_PAD_FL_SOURCE) { route_pad = route->source_pad; route_stream = route->source_stream; } else { route_pad = route->sink_pad; route_stream = route->sink_stream; } if (route_pad != pad->index) continue; *streams_mask |= BIT_ULL(route_stream); } if (!states_locked) v4l2_subdev_unlock_state(state); } #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ static void v4l2_link_validate_get_streams(struct media_pad *pad, u64 *streams_mask, bool states_locked) { struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity); if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) { /* Non-streams subdevs have an implicit stream 0 */ *streams_mask = BIT_ULL(0); return; } #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) __v4l2_link_validate_get_streams(pad, streams_mask, states_locked); #else /* This shouldn't happen */ *streams_mask = 0; #endif } static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked) { struct v4l2_subdev *sink_subdev = media_entity_to_v4l2_subdev(link->sink->entity); struct device *dev = sink_subdev->entity.graph_obj.mdev->dev; u64 source_streams_mask; u64 sink_streams_mask; u64 dangling_sink_streams; u32 stream; int ret; dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked); v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked); /* * It is ok to have more source streams than sink streams as extra * source streams can just be ignored by the receiver, but having extra * sink streams is an error as streams must have a source. */ dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) & sink_streams_mask; if (dangling_sink_streams) { dev_err(dev, "Dangling sink streams: mask %#llx\n", dangling_sink_streams); return -EINVAL; } /* Validate source and sink stream formats */ for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) { struct v4l2_subdev_format sink_fmt, source_fmt; if (!(sink_streams_mask & BIT_ULL(stream))) continue; dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n", link->source->entity->name, link->source->index, stream, link->sink->entity->name, link->sink->index, stream); ret = v4l2_subdev_link_validate_get_format(link->source, stream, &source_fmt, states_locked); if (ret < 0) { dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n", link->source->entity->name, link->source->index, stream); continue; } ret = v4l2_subdev_link_validate_get_format(link->sink, stream, &sink_fmt, states_locked); if (ret < 0) { dev_dbg(dev, "Failed to get format for \"%s\":%u:%u (but that's ok)\n", link->sink->entity->name, link->sink->index, stream); continue; } /* TODO: add stream number to link_validate() */ ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link, &source_fmt, &sink_fmt); if (!ret) continue; if (ret != -ENOIOCTLCMD) return ret; ret = v4l2_subdev_link_validate_default(sink_subdev, link, &source_fmt, &sink_fmt); if (ret) return ret; } return 0; } int v4l2_subdev_link_validate(struct media_link *link) { struct v4l2_subdev *source_sd, *sink_sd; struct v4l2_subdev_state *source_state, *sink_state; bool states_locked; int ret; /* * Links are validated in the context of the sink entity. Usage of this * helper on a sink that is not a subdev is a clear driver bug. */ if (WARN_ON_ONCE(!is_media_entity_v4l2_subdev(link->sink->entity))) return -EINVAL; /* * If the source is a video device, delegate link validation to it. This * allows usage of this helper for subdev connected to a video output * device, provided that the driver implement the video output device's * .link_validate() operation. */ if (is_media_entity_v4l2_video_device(link->source->entity)) { struct media_entity *source = link->source->entity; if (!source->ops || !source->ops->link_validate) { /* * Many existing drivers do not implement the required * .link_validate() operation for their video devices. * Print a warning to get the drivers fixed, and return * 0 to avoid breaking userspace. This should * eventually be turned into a WARN_ON() when all * drivers will have been fixed. */ pr_warn_once("video device '%s' does not implement .link_validate(), driver bug!\n", source->name); return 0; } /* * Avoid infinite loops in case a video device incorrectly uses * this helper function as its .link_validate() handler. */ if (WARN_ON(source->ops->link_validate == v4l2_subdev_link_validate)) return -EINVAL; return source->ops->link_validate(link); } /* * If the source is still not a subdev, usage of this helper is a clear * driver bug. */ if (WARN_ON(!is_media_entity_v4l2_subdev(link->source->entity))) return -EINVAL; sink_sd = media_entity_to_v4l2_subdev(link->sink->entity); source_sd = media_entity_to_v4l2_subdev(link->source->entity); sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd); source_state = v4l2_subdev_get_unlocked_active_state(source_sd); states_locked = sink_state && source_state; if (states_locked) v4l2_subdev_lock_states(sink_state, source_state); ret = v4l2_subdev_link_validate_locked(link, states_locked); if (states_locked) v4l2_subdev_unlock_states(sink_state, source_state); return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate); bool v4l2_subdev_has_pad_interdep(struct media_entity *entity, unsigned int pad0, unsigned int pad1) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct v4l2_subdev_krouting *routing; struct v4l2_subdev_state *state; unsigned int i; state = v4l2_subdev_lock_and_get_active_state(sd); routing = &state->routing; for (i = 0; i < routing->num_routes; ++i) { struct v4l2_subdev_route *route = &routing->routes[i]; if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) continue; if ((route->sink_pad == pad0 && route->source_pad == pad1) || (route->source_pad == pad0 && route->sink_pad == pad1)) { v4l2_subdev_unlock_state(state); return true; } } v4l2_subdev_unlock_state(state); return false; } EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep); struct v4l2_subdev_state * __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name, struct lock_class_key *lock_key) { struct v4l2_subdev_state *state; int ret; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return ERR_PTR(-ENOMEM); __mutex_init(&state->_lock, lock_name, lock_key); if (sd->state_lock) state->lock = sd->state_lock; else state->lock = &state->_lock; state->sd = sd; /* Drivers that support streams do not need the legacy pad config */ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) { state->pads = kvcalloc(sd->entity.num_pads, sizeof(*state->pads), GFP_KERNEL); if (!state->pads) { ret = -ENOMEM; goto err; } } if (sd->internal_ops && sd->internal_ops->init_state) { /* * There can be no race at this point, but we lock the state * anyway to satisfy lockdep checks. */ v4l2_subdev_lock_state(state); ret = sd->internal_ops->init_state(sd, state); v4l2_subdev_unlock_state(state); if (ret) goto err; } return state; err: if (state && state->pads) kvfree(state->pads); kfree(state); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc); void __v4l2_subdev_state_free(struct v4l2_subdev_state *state) { if (!state) return; mutex_destroy(&state->_lock); kfree(state->routing.routes); kvfree(state->stream_configs.configs); kvfree(state->pads); kfree(state); } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free); int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name, struct lock_class_key *key) { struct v4l2_subdev_state *state; struct device *dev = sd->dev; bool has_disable_streams; bool has_enable_streams; bool has_s_stream; /* Check that the subdevice implements the required features */ has_s_stream = v4l2_subdev_has_op(sd, video, s_stream); has_enable_streams = v4l2_subdev_has_op(sd, pad, enable_streams); has_disable_streams = v4l2_subdev_has_op(sd, pad, disable_streams); if (has_enable_streams != has_disable_streams) { dev_err(dev, "subdev '%s' must implement both or neither of .enable_streams() and .disable_streams()\n", sd->name); return -EINVAL; } if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { if (has_s_stream && !has_enable_streams) { dev_err(dev, "subdev '%s' must implement .enable/disable_streams()\n", sd->name); return -EINVAL; } } if (sd->ctrl_handler) sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; state = __v4l2_subdev_state_alloc(sd, name, key); if (IS_ERR(state)) return PTR_ERR(state); sd->active_state = state; return 0; } EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize); void v4l2_subdev_cleanup(struct v4l2_subdev *sd) { struct v4l2_async_subdev_endpoint *ase, *ase_tmp; __v4l2_subdev_state_free(sd->active_state); sd->active_state = NULL; /* Uninitialised sub-device, bail out here. */ if (!sd->async_subdev_endpoint_list.next) return; list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list, async_subdev_endpoint_entry) { list_del(&ase->async_subdev_endpoint_entry); kfree(ase); } } EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup); struct v4l2_mbus_framefmt * __v4l2_subdev_state_get_format(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON_ONCE(!state)) return NULL; if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].format; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].fmt; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format); struct v4l2_rect * __v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON_ONCE(!state)) return NULL; if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].crop; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].crop; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop); struct v4l2_rect * __v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON_ONCE(!state)) return NULL; if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].compose; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].compose; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose); struct v4l2_fract * __v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state, unsigned int pad, u32 stream) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; if (WARN_ON(!state)) return NULL; lockdep_assert_held(state->lock); if (state->pads) { if (stream) return NULL; if (pad >= state->sd->entity.num_pads) return NULL; return &state->pads[pad].interval; } lockdep_assert_held(state->lock); stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) { if (stream_configs->configs[i].pad == pad && stream_configs->configs[i].stream == stream) return &stream_configs->configs[i].interval; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval); #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) static int v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs, const struct v4l2_subdev_krouting *routing) { struct v4l2_subdev_stream_configs new_configs = { 0 }; struct v4l2_subdev_route *route; u32 idx; /* Count number of formats needed */ for_each_active_route(routing, route) { /* * Each route needs a format on both ends of the route. */ new_configs.num_configs += 2; } if (new_configs.num_configs) { new_configs.configs = kvcalloc(new_configs.num_configs, sizeof(*new_configs.configs), GFP_KERNEL); if (!new_configs.configs) return -ENOMEM; } /* * Fill in the 'pad' and stream' value for each item in the array from * the routing table */ idx = 0; for_each_active_route(routing, route) { new_configs.configs[idx].pad = route->sink_pad; new_configs.configs[idx].stream = route->sink_stream; idx++; new_configs.configs[idx].pad = route->source_pad; new_configs.configs[idx].stream = route->source_stream; idx++; } kvfree(stream_configs->configs); *stream_configs = new_configs; return 0; } int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt; fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); if (!fmt) return -EINVAL; format->format = *fmt; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt); int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_interval *fi) { struct v4l2_fract *interval; interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream); if (!interval) return -EINVAL; fi->interval = *interval; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval); int v4l2_subdev_set_routing(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, const struct v4l2_subdev_krouting *routing) { struct v4l2_subdev_krouting *dst = &state->routing; const struct v4l2_subdev_krouting *src = routing; struct v4l2_subdev_krouting new_routing = { 0 }; size_t bytes; int r; if (unlikely(check_mul_overflow((size_t)src->num_routes, sizeof(*src->routes), &bytes))) return -EOVERFLOW; lockdep_assert_held(state->lock); if (src->num_routes > 0) { new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL); if (!new_routing.routes) return -ENOMEM; } new_routing.num_routes = src->num_routes; r = v4l2_subdev_init_stream_configs(&state->stream_configs, &new_routing); if (r) { kfree(new_routing.routes); return r; } kfree(dst->routes); *dst = new_routing; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing); struct v4l2_subdev_route * __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing, struct v4l2_subdev_route *route) { if (route) ++route; else route = &routing->routes[0]; for (; route < routing->routes + routing->num_routes; ++route) { if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE)) continue; return route; } return NULL; } EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route); int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, const struct v4l2_subdev_krouting *routing, const struct v4l2_mbus_framefmt *fmt) { struct v4l2_subdev_stream_configs *stream_configs; unsigned int i; int ret; ret = v4l2_subdev_set_routing(sd, state, routing); if (ret) return ret; stream_configs = &state->stream_configs; for (i = 0; i < stream_configs->num_configs; ++i) stream_configs->configs[i].fmt = *fmt; return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt); int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing, u32 pad, u32 stream, u32 *other_pad, u32 *other_stream) { unsigned int i; for (i = 0; i < routing->num_routes; ++i) { struct v4l2_subdev_route *route = &routing->routes[i]; if (route->source_pad == pad && route->source_stream == stream) { if (other_pad) *other_pad = route->sink_pad; if (other_stream) *other_stream = route->sink_stream; return 0; } if (route->sink_pad == pad && route->sink_stream == stream) { if (other_pad) *other_pad = route->source_pad; if (other_stream) *other_stream = route->source_stream; return 0; } } return -EINVAL; } EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end); struct v4l2_mbus_framefmt * v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state, u32 pad, u32 stream) { u32 other_pad, other_stream; int ret; ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, stream, &other_pad, &other_stream); if (ret) return NULL; return v4l2_subdev_state_get_format(state, other_pad, other_stream); } EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format); u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state, u32 pad0, u32 pad1, u64 *streams) { const struct v4l2_subdev_krouting *routing = &state->routing; struct v4l2_subdev_route *route; u64 streams0 = 0; u64 streams1 = 0; for_each_active_route(routing, route) { if (route->sink_pad == pad0 && route->source_pad == pad1 && (*streams & BIT_ULL(route->sink_stream))) { streams0 |= BIT_ULL(route->sink_stream); streams1 |= BIT_ULL(route->source_stream); } if (route->source_pad == pad0 && route->sink_pad == pad1 && (*streams & BIT_ULL(route->source_stream))) { streams0 |= BIT_ULL(route->source_stream); streams1 |= BIT_ULL(route->sink_stream); } } *streams = streams0; return streams1; } EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams); int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, const struct v4l2_subdev_krouting *routing, enum v4l2_subdev_routing_restriction disallow) { u32 *remote_pads = NULL; unsigned int i, j; int ret = -EINVAL; if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX | V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) { remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads), GFP_KERNEL); if (!remote_pads) return -ENOMEM; for (i = 0; i < sd->entity.num_pads; ++i) remote_pads[i] = U32_MAX; } for (i = 0; i < routing->num_routes; ++i) { const struct v4l2_subdev_route *route = &routing->routes[i]; /* Validate the sink and source pad numbers. */ if (route->sink_pad >= sd->entity.num_pads || !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) { dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n", i, route->sink_pad); goto out; } if (route->source_pad >= sd->entity.num_pads || !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) { dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n", i, route->source_pad); goto out; } /* * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a * sink pad must be routed to a single source pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) { if (remote_pads[route->sink_pad] != U32_MAX && remote_pads[route->sink_pad] != route->source_pad) { dev_dbg(sd->dev, "route %u attempts to mix %s streams\n", i, "sink"); goto out; } } /* * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a * source pad must originate from a single sink pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) { if (remote_pads[route->source_pad] != U32_MAX && remote_pads[route->source_pad] != route->sink_pad) { dev_dbg(sd->dev, "route %u attempts to mix %s streams\n", i, "source"); goto out; } } /* * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink * side can not do stream multiplexing, i.e. there can be only * a single stream in a sink pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) { if (remote_pads[route->sink_pad] != U32_MAX) { dev_dbg(sd->dev, "route %u attempts to multiplex on %s pad %u\n", i, "sink", route->sink_pad); goto out; } } /* * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the * source side can not do stream multiplexing, i.e. there can * be only a single stream in a source pad. */ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) { if (remote_pads[route->source_pad] != U32_MAX) { dev_dbg(sd->dev, "route %u attempts to multiplex on %s pad %u\n", i, "source", route->source_pad); goto out; } } if (remote_pads) { remote_pads[route->sink_pad] = route->source_pad; remote_pads[route->source_pad] = route->sink_pad; } for (j = i + 1; j < routing->num_routes; ++j) { const struct v4l2_subdev_route *r = &routing->routes[j]; /* * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can * originate from the same (sink) stream. */ if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) && route->sink_pad == r->sink_pad && route->sink_stream == r->sink_stream) { dev_dbg(sd->dev, "routes %u and %u originate from same sink (%u/%u)\n", i, j, route->sink_pad, route->sink_stream); goto out; } /* * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end * at the same (source) stream. */ if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) && route->source_pad == r->source_pad && route->source_stream == r->source_stream) { dev_dbg(sd->dev, "routes %u and %u end at same source (%u/%u)\n", i, j, route->source_pad, route->source_stream); goto out; } } } ret = 0; out: kfree(remote_pads); return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate); static void v4l2_subdev_collect_streams(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 pad, u64 streams_mask, u64 *found_streams, u64 *enabled_streams) { if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { *found_streams = BIT_ULL(0); *enabled_streams = (sd->enabled_pads & BIT_ULL(pad)) ? BIT_ULL(0) : 0; return; } *found_streams = 0; *enabled_streams = 0; for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { const struct v4l2_subdev_stream_config *cfg = &state->stream_configs.configs[i]; if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream))) continue; *found_streams |= BIT_ULL(cfg->stream); if (cfg->enabled) *enabled_streams |= BIT_ULL(cfg->stream); } } static void v4l2_subdev_set_streams_enabled(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 pad, u64 streams_mask, bool enabled) { if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) { if (enabled) sd->enabled_pads |= BIT_ULL(pad); else sd->enabled_pads &= ~BIT_ULL(pad); return; } for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { struct v4l2_subdev_stream_config *cfg = &state->stream_configs.configs[i]; if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream))) cfg->enabled = enabled; } } int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, u64 streams_mask) { struct device *dev = sd->entity.graph_obj.mdev->dev; struct v4l2_subdev_state *state; bool already_streaming; u64 enabled_streams; u64 found_streams; bool use_s_stream; int ret; /* A few basic sanity checks first. */ if (pad >= sd->entity.num_pads) return -EINVAL; if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP; /* * We use a 64-bit bitmask for tracking enabled pads, so only subdevices * with 64 pads or less can be supported. */ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) return -EOPNOTSUPP; if (!streams_mask) return 0; /* Fallback on .s_stream() if .enable_streams() isn't available. */ use_s_stream = !v4l2_subdev_has_op(sd, pad, enable_streams); if (!use_s_stream) state = v4l2_subdev_lock_and_get_active_state(sd); else state = NULL; /* * Verify that the requested streams exist and that they are not * already enabled. */ v4l2_subdev_collect_streams(sd, state, pad, streams_mask, &found_streams, &enabled_streams); if (found_streams != streams_mask) { dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", streams_mask & ~found_streams, sd->entity.name, pad); ret = -EINVAL; goto done; } if (enabled_streams) { dev_dbg(dev, "streams 0x%llx already enabled on %s:%u\n", enabled_streams, sd->entity.name, pad); ret = -EALREADY; goto done; } dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask); already_streaming = v4l2_subdev_is_streaming(sd); if (!use_s_stream) { /* Call the .enable_streams() operation. */ ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad, streams_mask); } else { /* Start streaming when the first pad is enabled. */ if (!already_streaming) ret = v4l2_subdev_call(sd, video, s_stream, 1); else ret = 0; } if (ret) { dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad, streams_mask, ret); goto done; } /* Mark the streams as enabled. */ v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, true); /* * TODO: When all the drivers have been changed to use * v4l2_subdev_enable_streams() and v4l2_subdev_disable_streams(), * instead of calling .s_stream() operation directly, we can remove * the privacy LED handling from call_s_stream() and do it here * for all cases. */ if (!use_s_stream && !already_streaming) v4l2_subdev_enable_privacy_led(sd); done: if (!use_s_stream) v4l2_subdev_unlock_state(state); return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams); int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad, u64 streams_mask) { struct device *dev = sd->entity.graph_obj.mdev->dev; struct v4l2_subdev_state *state; u64 enabled_streams; u64 found_streams; bool use_s_stream; int ret; /* A few basic sanity checks first. */ if (pad >= sd->entity.num_pads) return -EINVAL; if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) return -EOPNOTSUPP; /* * We use a 64-bit bitmask for tracking enabled pads, so only subdevices * with 64 pads or less can be supported. */ if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE) return -EOPNOTSUPP; if (!streams_mask) return 0; /* Fallback on .s_stream() if .disable_streams() isn't available. */ use_s_stream = !v4l2_subdev_has_op(sd, pad, disable_streams); if (!use_s_stream) state = v4l2_subdev_lock_and_get_active_state(sd); else state = NULL; /* * Verify that the requested streams exist and that they are not * already disabled. */ v4l2_subdev_collect_streams(sd, state, pad, streams_mask, &found_streams, &enabled_streams); if (found_streams != streams_mask) { dev_dbg(dev, "streams 0x%llx not found on %s:%u\n", streams_mask & ~found_streams, sd->entity.name, pad); ret = -EINVAL; goto done; } if (enabled_streams != streams_mask) { dev_dbg(dev, "streams 0x%llx already disabled on %s:%u\n", streams_mask & ~enabled_streams, sd->entity.name, pad); ret = -EALREADY; goto done; } dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask); if (!use_s_stream) { /* Call the .disable_streams() operation. */ ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad, streams_mask); } else { /* Stop streaming when the last streams are disabled. */ if (!(sd->enabled_pads & ~BIT_ULL(pad))) ret = v4l2_subdev_call(sd, video, s_stream, 0); else ret = 0; } if (ret) { dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad, streams_mask, ret); goto done; } v4l2_subdev_set_streams_enabled(sd, state, pad, streams_mask, false); done: if (!use_s_stream) { if (!v4l2_subdev_is_streaming(sd)) v4l2_subdev_disable_privacy_led(sd); v4l2_subdev_unlock_state(state); } return ret; } EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams); int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable) { struct v4l2_subdev_state *state; struct v4l2_subdev_route *route; struct media_pad *pad; u64 source_mask = 0; int pad_index = -1; /* * Find the source pad. This helper is meant for subdevs that have a * single source pad, so failures shouldn't happen, but catch them * loudly nonetheless as they indicate a driver bug. */ media_entity_for_each_pad(&sd->entity, pad) { if (pad->flags & MEDIA_PAD_FL_SOURCE) { pad_index = pad->index; break; } } if (WARN_ON(pad_index == -1)) return -EINVAL; if (sd->flags & V4L2_SUBDEV_FL_STREAMS) { /* * As there's a single source pad, just collect all the source * streams. */ state = v4l2_subdev_lock_and_get_active_state(sd); for_each_active_route(&state->routing, route) source_mask |= BIT_ULL(route->source_stream); v4l2_subdev_unlock_state(state); } else { /* * For non-streams subdevices, there's a single implicit stream * per pad. */ source_mask = BIT_ULL(0); } if (enable) return v4l2_subdev_enable_streams(sd, pad_index, source_mask); else return v4l2_subdev_disable_streams(sd, pad_index, source_mask); } EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper); #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */ #endif /* CONFIG_MEDIA_CONTROLLER */ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops) { INIT_LIST_HEAD(&sd->list); BUG_ON(!ops); sd->ops = ops; sd->v4l2_dev = NULL; sd->flags = 0; sd->name[0] = '\0'; sd->grp_id = 0; sd->dev_priv = NULL; sd->host_priv = NULL; sd->privacy_led = NULL; INIT_LIST_HEAD(&sd->async_subdev_endpoint_list); #if defined(CONFIG_MEDIA_CONTROLLER) sd->entity.name = sd->name; sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV; sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; #endif } EXPORT_SYMBOL(v4l2_subdev_init); void v4l2_subdev_notify_event(struct v4l2_subdev *sd, const struct v4l2_event *ev) { v4l2_event_queue(sd->devnode, ev); v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev); } EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event); bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd) { struct v4l2_subdev_state *state; if (!v4l2_subdev_has_op(sd, pad, enable_streams)) return sd->s_stream_enabled; if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS)) return !!sd->enabled_pads; state = v4l2_subdev_get_locked_active_state(sd); for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) { const struct v4l2_subdev_stream_config *cfg; cfg = &state->stream_configs.configs[i]; if (cfg->enabled) return true; } return false; } EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) sd->privacy_led = led_get(sd->dev, "privacy-led"); if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), "getting privacy LED\n"); if (!IS_ERR_OR_NULL(sd->privacy_led)) { mutex_lock(&sd->privacy_led->led_access); led_sysfs_disable(sd->privacy_led); led_trigger_remove(sd->privacy_led); led_set_brightness(sd->privacy_led, 0); mutex_unlock(&sd->privacy_led->led_access); } #endif return 0; } EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led); void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) if (!IS_ERR_OR_NULL(sd->privacy_led)) { mutex_lock(&sd->privacy_led->led_access); led_sysfs_enable(sd->privacy_led); mutex_unlock(&sd->privacy_led->led_access); led_put(sd->privacy_led); } #endif } EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
158 159 159 159 159 159 159 159 159 159 159 158 159 158 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 // SPDX-License-Identifier: GPL-2.0+ /* * 2002-10-15 Posix Clocks & timers * by George Anzinger george@mvista.com * Copyright (C) 2002 2003 by MontaVista Software. * * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. * Copyright (C) 2004 Boris Hu * * These are all the functions necessary to implement POSIX clocks & timers */ #include <linux/compat.h> #include <linux/compiler.h> #include <linux/init.h> #include <linux/jhash.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/memblock.h> #include <linux/nospec.h> #include <linux/posix-clock.h> #include <linux/posix-timers.h> #include <linux/prctl.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/time.h> #include <linux/time_namespace.h> #include <linux/uaccess.h> #include "timekeeping.h" #include "posix-timers.h" static struct kmem_cache *posix_timers_cache; /* * Timers are managed in a hash table for lockless lookup. The hash key is * constructed from current::signal and the timer ID and the timer is * matched against current::signal and the timer ID when walking the hash * bucket list. * * This allows checkpoint/restore to reconstruct the exact timer IDs for * a process. */ struct timer_hash_bucket { spinlock_t lock; struct hlist_head head; }; static struct { struct timer_hash_bucket *buckets; unsigned long mask; } __timer_data __ro_after_init __aligned(2*sizeof(long)); #define timer_buckets (__timer_data.buckets) #define timer_hashmask (__timer_data.mask) static const struct k_clock * const posix_clocks[]; static const struct k_clock *clockid_to_kclock(const clockid_t id); static const struct k_clock clock_realtime, clock_monotonic; #define TIMER_ANY_ID INT_MIN /* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */ #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" #endif static struct k_itimer *__lock_timer(timer_t timer_id); #define lock_timer(tid) \ ({ struct k_itimer *__timr; \ __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid)); \ __timr; \ }) static inline void unlock_timer(struct k_itimer *timr) { if (likely((timr))) spin_unlock_irq(&timr->it_lock); } #define scoped_timer_get_or_fail(_id) \ scoped_cond_guard(lock_timer, return -EINVAL, _id) #define scoped_timer (scope) DEFINE_CLASS(lock_timer, struct k_itimer *, unlock_timer(_T), __lock_timer(id), timer_t id); DEFINE_CLASS_IS_COND_GUARD(lock_timer); static struct timer_hash_bucket *hash_bucket(struct signal_struct *sig, unsigned int nr) { return &timer_buckets[jhash2((u32 *)&sig, sizeof(sig) / sizeof(u32), nr) & timer_hashmask]; } static struct k_itimer *posix_timer_by_id(timer_t id) { struct signal_struct *sig = current->signal; struct timer_hash_bucket *bucket = hash_bucket(sig, id); struct k_itimer *timer; hlist_for_each_entry_rcu(timer, &bucket->head, t_hash) { /* timer->it_signal can be set concurrently */ if ((READ_ONCE(timer->it_signal) == sig) && (timer->it_id == id)) return timer; } return NULL; } static inline struct signal_struct *posix_sig_owner(const struct k_itimer *timer) { unsigned long val = (unsigned long)timer->it_signal; /* * Mask out bit 0, which acts as invalid marker to prevent * posix_timer_by_id() detecting it as valid. */ return (struct signal_struct *)(val & ~1UL); } static bool posix_timer_hashed(struct timer_hash_bucket *bucket, struct signal_struct *sig, timer_t id) { struct hlist_head *head = &bucket->head; struct k_itimer *timer; hlist_for_each_entry_rcu(timer, head, t_hash, lockdep_is_held(&bucket->lock)) { if ((posix_sig_owner(timer) == sig) && (timer->it_id == id)) return true; } return false; } static bool posix_timer_add_at(struct k_itimer *timer, struct signal_struct *sig, unsigned int id) { struct timer_hash_bucket *bucket = hash_bucket(sig, id); scoped_guard (spinlock, &bucket->lock) { /* * Validate under the lock as this could have raced against * another thread ending up with the same ID, which is * highly unlikely, but possible. */ if (!posix_timer_hashed(bucket, sig, id)) { /* * Set the timer ID and the signal pointer to make * it identifiable in the hash table. The signal * pointer has bit 0 set to indicate that it is not * yet fully initialized. posix_timer_hashed() * masks this bit out, but the syscall lookup fails * to match due to it being set. This guarantees * that there can't be duplicate timer IDs handed * out. */ timer->it_id = (timer_t)id; timer->it_signal = (struct signal_struct *)((unsigned long)sig | 1UL); hlist_add_head_rcu(&timer->t_hash, &bucket->head); return true; } } return false; } static int posix_timer_add(struct k_itimer *timer, int req_id) { struct signal_struct *sig = current->signal; if (unlikely(req_id != TIMER_ANY_ID)) { if (!posix_timer_add_at(timer, sig, req_id)) return -EBUSY; /* * Move the ID counter past the requested ID, so that after * switching back to normal mode the IDs are outside of the * exact allocated region. That avoids ID collisions on the * next regular timer_create() invocations. */ atomic_set(&sig->next_posix_timer_id, req_id + 1); return req_id; } for (unsigned int cnt = 0; cnt <= INT_MAX; cnt++) { /* Get the next timer ID and clamp it to positive space */ unsigned int id = atomic_fetch_inc(&sig->next_posix_timer_id) & INT_MAX; if (posix_timer_add_at(timer, sig, id)) return id; cond_resched(); } /* POSIX return code when no timer ID could be allocated */ return -EAGAIN; } static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp) { ktime_get_real_ts64(tp); return 0; } static ktime_t posix_get_realtime_ktime(clockid_t which_clock) { return ktime_get_real(); } static int posix_clock_realtime_set(const clockid_t which_clock, const struct timespec64 *tp) { return do_sys_settimeofday64(tp, NULL); } static int posix_clock_realtime_adj(const clockid_t which_clock, struct __kernel_timex *t) { return do_adjtimex(t); } static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp) { ktime_get_ts64(tp); timens_add_monotonic(tp); return 0; } static ktime_t posix_get_monotonic_ktime(clockid_t which_clock) { return ktime_get(); } static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) { ktime_get_raw_ts64(tp); timens_add_monotonic(tp); return 0; } static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) { ktime_get_coarse_real_ts64(tp); return 0; } static int posix_get_monotonic_coarse(clockid_t which_clock, struct timespec64 *tp) { ktime_get_coarse_ts64(tp); timens_add_monotonic(tp); return 0; } static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) { *tp = ktime_to_timespec64(KTIME_LOW_RES); return 0; } static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp) { ktime_get_boottime_ts64(tp); timens_add_boottime(tp); return 0; } static ktime_t posix_get_boottime_ktime(const clockid_t which_clock) { return ktime_get_boottime(); } static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp) { ktime_get_clocktai_ts64(tp); return 0; } static ktime_t posix_get_tai_ktime(clockid_t which_clock) { return ktime_get_clocktai(); } static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) { tp->tv_sec = 0; tp->tv_nsec = hrtimer_resolution; return 0; } static __init int init_posix_timers(void) { posix_timers_cache = kmem_cache_create("posix_timers_cache", sizeof(struct k_itimer), __alignof__(struct k_itimer), SLAB_ACCOUNT, NULL); return 0; } __initcall(init_posix_timers); /* * The siginfo si_overrun field and the return value of timer_getoverrun(2) * are of type int. Clamp the overrun value to INT_MAX */ static inline int timer_overrun_to_int(struct k_itimer *timr) { if (timr->it_overrun_last > (s64)INT_MAX) return INT_MAX; return (int)timr->it_overrun_last; } static void common_hrtimer_rearm(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), timr->it_interval); hrtimer_restart(timer); } static bool __posixtimer_deliver_signal(struct kernel_siginfo *info, struct k_itimer *timr) { guard(spinlock)(&timr->it_lock); /* * Check if the timer is still alive or whether it got modified * since the signal was queued. In either case, don't rearm and * drop the signal. */ if (timr->it_signal_seq != timr->it_sigqueue_seq || WARN_ON_ONCE(!posixtimer_valid(timr))) return false; if (!timr->it_interval || WARN_ON_ONCE(timr->it_status != POSIX_TIMER_REQUEUE_PENDING)) return true; timr->kclock->timer_rearm(timr); timr->it_status = POSIX_TIMER_ARMED; timr->it_overrun_last = timr->it_overrun; timr->it_overrun = -1LL; ++timr->it_signal_seq; info->si_overrun = timer_overrun_to_int(timr); return true; } /* * This function is called from the signal delivery code. It decides * whether the signal should be dropped and rearms interval timers. The * timer can be unconditionally accessed as there is a reference held on * it. */ bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq) { struct k_itimer *timr = container_of(timer_sigq, struct k_itimer, sigq); bool ret; /* * Release siglock to ensure proper locking order versus * timr::it_lock. Keep interrupts disabled. */ spin_unlock(&current->sighand->siglock); ret = __posixtimer_deliver_signal(info, timr); /* Drop the reference which was acquired when the signal was queued */ posixtimer_putref(timr); spin_lock(&current->sighand->siglock); return ret; } void posix_timer_queue_signal(struct k_itimer *timr) { lockdep_assert_held(&timr->it_lock); if (!posixtimer_valid(timr)) return; timr->it_status = timr->it_interval ? POSIX_TIMER_REQUEUE_PENDING : POSIX_TIMER_DISARMED; posixtimer_send_sigqueue(timr); } /* * This function gets called when a POSIX.1b interval timer expires from * the HRTIMER interrupt (soft interrupt on RT kernels). * * Handles CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME and CLOCK_TAI * based timers. */ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr = container_of(timer, struct k_itimer, it.real.timer); guard(spinlock_irqsave)(&timr->it_lock); posix_timer_queue_signal(timr); return HRTIMER_NORESTART; } long posixtimer_create_prctl(unsigned long ctrl) { switch (ctrl) { case PR_TIMER_CREATE_RESTORE_IDS_OFF: current->signal->timer_create_restore_ids = 0; return 0; case PR_TIMER_CREATE_RESTORE_IDS_ON: current->signal->timer_create_restore_ids = 1; return 0; case PR_TIMER_CREATE_RESTORE_IDS_GET: return current->signal->timer_create_restore_ids; } return -EINVAL; } static struct pid *good_sigevent(sigevent_t * event) { struct pid *pid = task_tgid(current); struct task_struct *rtn; switch (event->sigev_notify) { case SIGEV_SIGNAL | SIGEV_THREAD_ID: pid = find_vpid(event->sigev_notify_thread_id); rtn = pid_task(pid, PIDTYPE_PID); if (!rtn || !same_thread_group(rtn, current)) return NULL; fallthrough; case SIGEV_SIGNAL: case SIGEV_THREAD: if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) return NULL; fallthrough; case SIGEV_NONE: return pid; default: return NULL; } } static struct k_itimer *alloc_posix_timer(void) { struct k_itimer *tmr; if (unlikely(!posix_timers_cache)) return NULL; tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); if (!tmr) return tmr; if (unlikely(!posixtimer_init_sigqueue(&tmr->sigq))) { kmem_cache_free(posix_timers_cache, tmr); return NULL; } rcuref_init(&tmr->rcuref, 1); return tmr; } void posixtimer_free_timer(struct k_itimer *tmr) { put_pid(tmr->it_pid); if (tmr->sigq.ucounts) dec_rlimit_put_ucounts(tmr->sigq.ucounts, UCOUNT_RLIMIT_SIGPENDING); kfree_rcu(tmr, rcu); } static void posix_timer_unhash_and_free(struct k_itimer *tmr) { struct timer_hash_bucket *bucket = hash_bucket(posix_sig_owner(tmr), tmr->it_id); scoped_guard (spinlock, &bucket->lock) hlist_del_rcu(&tmr->t_hash); posixtimer_putref(tmr); } static int common_timer_create(struct k_itimer *new_timer) { hrtimer_setup(&new_timer->it.real.timer, posix_timer_fn, new_timer->it_clock, 0); return 0; } /* Create a POSIX.1b interval timer. */ static int do_timer_create(clockid_t which_clock, struct sigevent *event, timer_t __user *created_timer_id) { const struct k_clock *kc = clockid_to_kclock(which_clock); timer_t req_id = TIMER_ANY_ID; struct k_itimer *new_timer; int error, new_timer_id; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); /* Special case for CRIU to restore timers with a given timer ID. */ if (unlikely(current->signal->timer_create_restore_ids)) { if (copy_from_user(&req_id, created_timer_id, sizeof(req_id))) return -EFAULT; /* Valid IDs are 0..INT_MAX */ if ((unsigned int)req_id > INT_MAX) return -EINVAL; } /* * Add the timer to the hash table. The timer is not yet valid * after insertion, but has a unique ID allocated. */ new_timer_id = posix_timer_add(new_timer, req_id); if (new_timer_id < 0) { posixtimer_free_timer(new_timer); return new_timer_id; } new_timer->it_clock = which_clock; new_timer->kclock = kc; new_timer->it_overrun = -1LL; if (event) { scoped_guard (rcu) new_timer->it_pid = get_pid(good_sigevent(event)); if (!new_timer->it_pid) { error = -EINVAL; goto out; } new_timer->it_sigev_notify = event->sigev_notify; new_timer->sigq.info.si_signo = event->sigev_signo; new_timer->sigq.info.si_value = event->sigev_value; } else { new_timer->it_sigev_notify = SIGEV_SIGNAL; new_timer->sigq.info.si_signo = SIGALRM; new_timer->sigq.info.si_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } if (new_timer->it_sigev_notify & SIGEV_THREAD_ID) new_timer->it_pid_type = PIDTYPE_PID; else new_timer->it_pid_type = PIDTYPE_TGID; new_timer->sigq.info.si_tid = new_timer->it_id; new_timer->sigq.info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } /* * After succesful copy out, the timer ID is visible to user space * now but not yet valid because new_timer::signal low order bit is 1. * * Complete the initialization with the clock specific create * callback. */ error = kc->timer_create(new_timer); if (error) goto out; /* * timer::it_lock ensures that __lock_timer() observes a fully * initialized timer when it observes a valid timer::it_signal. * * sighand::siglock is required to protect signal::posix_timers. */ scoped_guard (spinlock_irq, &new_timer->it_lock) { guard(spinlock)(&current->sighand->siglock); /* * new_timer::it_signal contains the signal pointer with * bit 0 set, which makes it invalid for syscall operations. * Store the unmodified signal pointer to make it valid. */ WRITE_ONCE(new_timer->it_signal, current->signal); hlist_add_head_rcu(&new_timer->list, &current->signal->posix_timers); } /* * After unlocking @new_timer is subject to concurrent removal and * cannot be touched anymore */ return 0; out: posix_timer_unhash_and_free(new_timer); return error; } SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (copy_from_user(&event, timer_event_spec, sizeof (event))) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, struct compat_sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (get_compat_sigevent(&event, timer_event_spec)) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #endif static struct k_itimer *__lock_timer(timer_t timer_id) { struct k_itimer *timr; /* * timer_t could be any type >= int and we want to make sure any * @timer_id outside positive int range fails lookup. */ if ((unsigned long long)timer_id > INT_MAX) return NULL; /* * The hash lookup and the timers are RCU protected. * * Timers are added to the hash in invalid state where * timr::it_signal is marked invalid. timer::it_signal is only set * after the rest of the initialization succeeded. * * Timer destruction happens in steps: * 1) Set timr::it_signal marked invalid with timr::it_lock held * 2) Release timr::it_lock * 3) Remove from the hash under hash_lock * 4) Put the reference count. * * The reference count might not drop to zero if timr::sigq is * queued. In that case the signal delivery or flush will put the * last reference count. * * When the reference count reaches zero, the timer is scheduled * for RCU removal after the grace period. * * Holding rcu_read_lock() across the lookup ensures that * the timer cannot be freed. * * The lookup validates locklessly that timr::it_signal == * current::it_signal and timr::it_id == @timer_id. timr::it_id * can't change, but timr::it_signal can become invalid during * destruction, which makes the locked check fail. */ guard(rcu)(); timr = posix_timer_by_id(timer_id); if (timr) { spin_lock_irq(&timr->it_lock); /* * Validate under timr::it_lock that timr::it_signal is * still valid. Pairs with #1 above. */ if (timr->it_signal == current->signal) return timr; spin_unlock_irq(&timr->it_lock); } return NULL; } static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return __hrtimer_expires_remaining_adjusted(timer, now); } static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return hrtimer_forward(timer, now, timr->it_interval); } /* * Get the time remaining on a POSIX.1b interval timer. * * Two issues to handle here: * * 1) The timer has a requeue pending. The return value must appear as * if the timer has been requeued right now. * * 2) The timer is a SIGEV_NONE timer. These timers are never enqueued * into the hrtimer queue and therefore never expired. Emulate expiry * here taking #1 into account. */ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) { const struct k_clock *kc = timr->kclock; ktime_t now, remaining, iv; bool sig_none; sig_none = timr->it_sigev_notify == SIGEV_NONE; iv = timr->it_interval; /* interval timer ? */ if (iv) { cur_setting->it_interval = ktime_to_timespec64(iv); } else if (timr->it_status == POSIX_TIMER_DISARMED) { /* * SIGEV_NONE oneshot timers are never queued and therefore * timr->it_status is always DISARMED. The check below * vs. remaining time will handle this case. * * For all other timers there is nothing to update here, so * return. */ if (!sig_none) return; } now = kc->clock_get_ktime(timr->it_clock); /* * If this is an interval timer and either has requeue pending or * is a SIGEV_NONE timer move the expiry time forward by intervals, * so expiry is > now. */ if (iv && timr->it_status != POSIX_TIMER_ARMED) timr->it_overrun += kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* * As @now is retrieved before a possible timer_forward() and * cannot be reevaluated by the compiler @remaining is based on the * same @now value. Therefore @remaining is consistent vs. @now. * * Consequently all interval timers, i.e. @iv > 0, cannot have a * remaining time <= 0 because timer_forward() guarantees to move * them forward so that the next timer expiry is > @now. */ if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when it is * expired! Timers which have a real signal delivery mode * must return a remaining time greater than 0 because the * signal has not yet been delivered. */ if (!sig_none) cur_setting->it_value.tv_nsec = 1; } else { cur_setting->it_value = ktime_to_timespec64(remaining); } } static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) { memset(setting, 0, sizeof(*setting)); scoped_timer_get_or_fail(timer_id) scoped_timer->kclock->timer_get(scoped_timer, setting); return 0; } /* Get the time remaining on a POSIX.1b interval timer. */ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct __kernel_itimerspec __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_itimerspec64(&cur_setting, setting)) ret = -EFAULT; } return ret; } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id, struct old_itimerspec32 __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_old_itimerspec32(&cur_setting, setting)) ret = -EFAULT; } return ret; } #endif /** * sys_timer_getoverrun - Get the number of overruns of a POSIX.1b interval timer * @timer_id: The timer ID which identifies the timer * * The "overrun count" of a timer is one plus the number of expiration * intervals which have elapsed between the first expiry, which queues the * signal and the actual signal delivery. On signal delivery the "overrun * count" is calculated and cached, so it can be returned directly here. * * As this is relative to the last queued signal the returned overrun count * is meaningless outside of the signal delivery path and even there it * does not accurately reflect the current state when user space evaluates * it. * * Returns: * -EINVAL @timer_id is invalid * 1..INT_MAX The number of overruns related to the last delivered signal */ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) { scoped_timer_get_or_fail(timer_id) return timer_overrun_to_int(scoped_timer); } static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, bool absolute, bool sigev_none) { struct hrtimer *timer = &timr->it.real.timer; enum hrtimer_mode mode; mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; /* * Posix magic: Relative CLOCK_REALTIME timers are not affected by * clock modifications, so they become CLOCK_MONOTONIC based under the * hood. See hrtimer_setup(). Update timr->kclock, so the generic * functions which use timr->kclock->clock_get_*() work. * * Note: it_clock stays unmodified, because the next timer_set() might * use ABSTIME, so it needs to switch back. */ if (timr->it_clock == CLOCK_REALTIME) timr->kclock = absolute ? &clock_realtime : &clock_monotonic; hrtimer_setup(&timr->it.real.timer, posix_timer_fn, timr->it_clock, mode); if (!absolute) expires = ktime_add_safe(expires, timer->base->get_time()); hrtimer_set_expires(timer, expires); if (!sigev_none) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } static int common_hrtimer_try_to_cancel(struct k_itimer *timr) { return hrtimer_try_to_cancel(&timr->it.real.timer); } static void common_timer_wait_running(struct k_itimer *timer) { hrtimer_cancel_wait_running(&timer->it.real.timer); } /* * On PREEMPT_RT this prevents priority inversion and a potential livelock * against the ksoftirqd thread in case that ksoftirqd gets preempted while * executing a hrtimer callback. * * See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this * just results in a cpu_relax(). * * For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is * just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this * prevents spinning on an eventually scheduled out task and a livelock * when the task which tries to delete or disarm the timer has preempted * the task which runs the expiry in task work context. */ static void timer_wait_running(struct k_itimer *timer) { /* * kc->timer_wait_running() might drop RCU lock. So @timer * cannot be touched anymore after the function returns! */ timer->kclock->timer_wait_running(timer); } /* * Set up the new interval and reset the signal delivery data */ void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting) { if (new_setting->it_value.tv_sec || new_setting->it_value.tv_nsec) timer->it_interval = timespec64_to_ktime(new_setting->it_interval); else timer->it_interval = 0; /* Reset overrun accounting */ timer->it_overrun_last = 0; timer->it_overrun = -1LL; } /* Set a POSIX.1b interval timer. */ int common_timer_set(struct k_itimer *timr, int flags, struct itimerspec64 *new_setting, struct itimerspec64 *old_setting) { const struct k_clock *kc = timr->kclock; bool sigev_none; ktime_t expires; if (old_setting) common_timer_get(timr, old_setting); /* * Careful here. On SMP systems the timer expiry function could be * active and spinning on timr->it_lock. */ if (kc->timer_try_to_cancel(timr) < 0) return TIMER_RETRY; timr->it_status = POSIX_TIMER_DISARMED; posix_timer_set_common(timr, new_setting); /* Keep timer disarmed when it_value is zero */ if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) return 0; expires = timespec64_to_ktime(new_setting->it_value); if (flags & TIMER_ABSTIME) expires = timens_ktime_to_host(timr->it_clock, expires); sigev_none = timr->it_sigev_notify == SIGEV_NONE; kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); if (!sigev_none) timr->it_status = POSIX_TIMER_ARMED; return 0; } static int do_timer_settime(timer_t timer_id, int tmr_flags, struct itimerspec64 *new_spec64, struct itimerspec64 *old_spec64) { if (!timespec64_valid(&new_spec64->it_interval) || !timespec64_valid(&new_spec64->it_value)) return -EINVAL; if (old_spec64) memset(old_spec64, 0, sizeof(*old_spec64)); for (; ; old_spec64 = NULL) { struct k_itimer *timr; scoped_timer_get_or_fail(timer_id) { timr = scoped_timer; if (old_spec64) old_spec64->it_interval = ktime_to_timespec64(timr->it_interval); /* Prevent signal delivery and rearming. */ timr->it_signal_seq++; int ret = timr->kclock->timer_set(timr, tmr_flags, new_spec64, old_spec64); if (ret != TIMER_RETRY) return ret; /* Protect the timer from being freed when leaving the lock scope */ rcu_read_lock(); } timer_wait_running(timr); rcu_read_unlock(); } } /* Set a POSIX.1b interval timer */ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, const struct __kernel_itimerspec __user *, new_setting, struct __kernel_itimerspec __user *, old_setting) { struct itimerspec64 new_spec, old_spec, *rtn; int error = 0; if (!new_setting) return -EINVAL; if (get_itimerspec64(&new_spec, new_setting)) return -EFAULT; rtn = old_setting ? &old_spec : NULL; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old_setting) { if (put_itimerspec64(&old_spec, old_setting)) error = -EFAULT; } return error; } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags, struct old_itimerspec32 __user *, new, struct old_itimerspec32 __user *, old) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old ? &old_spec : NULL; int error = 0; if (!new) return -EINVAL; if (get_old_itimerspec32(&new_spec, new)) return -EFAULT; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old) { if (put_old_itimerspec32(&old_spec, old)) error = -EFAULT; } return error; } #endif int common_timer_del(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; if (kc->timer_try_to_cancel(timer) < 0) return TIMER_RETRY; timer->it_status = POSIX_TIMER_DISARMED; return 0; } /* * If the deleted timer is on the ignored list, remove it and * drop the associated reference. */ static inline void posix_timer_cleanup_ignored(struct k_itimer *tmr) { if (!hlist_unhashed(&tmr->ignored_list)) { hlist_del_init(&tmr->ignored_list); posixtimer_putref(tmr); } } static void posix_timer_delete(struct k_itimer *timer) { /* * Invalidate the timer, remove it from the linked list and remove * it from the ignored list if pending. * * The invalidation must be written with siglock held so that the * signal code observes the invalidated timer::it_signal in * do_sigaction(), which prevents it from moving a pending signal * of a deleted timer to the ignore list. * * The invalidation also prevents signal queueing, signal delivery * and therefore rearming from the signal delivery path. * * A concurrent lookup can still find the timer in the hash, but it * will check timer::it_signal with timer::it_lock held and observe * bit 0 set, which invalidates it. That also prevents the timer ID * from being handed out before this timer is completely gone. */ timer->it_signal_seq++; scoped_guard (spinlock, &current->sighand->siglock) { unsigned long sig = (unsigned long)timer->it_signal | 1UL; WRITE_ONCE(timer->it_signal, (struct signal_struct *)sig); hlist_del_rcu(&timer->list); posix_timer_cleanup_ignored(timer); } while (timer->kclock->timer_del(timer) == TIMER_RETRY) { guard(rcu)(); spin_unlock_irq(&timer->it_lock); timer_wait_running(timer); spin_lock_irq(&timer->it_lock); } } /* Delete a POSIX.1b interval timer. */ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) { struct k_itimer *timer; scoped_timer_get_or_fail(timer_id) { timer = scoped_timer; posix_timer_delete(timer); } /* Remove it from the hash, which frees up the timer ID */ posix_timer_unhash_and_free(timer); return 0; } /* * Invoked from do_exit() when the last thread of a thread group exits. * At that point no other task can access the timers of the dying * task anymore. */ void exit_itimers(struct task_struct *tsk) { struct hlist_head timers; struct hlist_node *next; struct k_itimer *timer; /* Clear restore mode for exec() */ tsk->signal->timer_create_restore_ids = 0; if (hlist_empty(&tsk->signal->posix_timers)) return; /* Protect against concurrent read via /proc/$PID/timers */ scoped_guard (spinlock_irq, &tsk->sighand->siglock) hlist_move_list(&tsk->signal->posix_timers, &timers); /* The timers are not longer accessible via tsk::signal */ hlist_for_each_entry_safe(timer, next, &timers, list) { scoped_guard (spinlock_irq, &timer->it_lock) posix_timer_delete(timer); posix_timer_unhash_and_free(timer); cond_resched(); } /* * There should be no timers on the ignored list. itimer_delete() has * mopped them up. */ if (!WARN_ON_ONCE(!hlist_empty(&tsk->signal->ignored_posix_timers))) return; hlist_move_list(&tsk->signal->ignored_posix_timers, &timers); while (!hlist_empty(&timers)) { posix_timer_cleanup_ignored(hlist_entry(timers.first, struct k_itimer, ignored_list)); } } SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, const struct __kernel_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 new_tp; if (!kc || !kc->clock_set) return -EINVAL; if (get_timespec64(&new_tp, tp)) return -EFAULT; /* * Permission checks have to be done inside the clock specific * setter callback. */ return kc->clock_set(which_clock, &new_tp); } SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, struct __kernel_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 kernel_tp; int error; if (!kc) return -EINVAL; error = kc->clock_get_timespec(which_clock, &kernel_tp); if (!error && put_timespec64(&kernel_tp, tp)) error = -EFAULT; return error; } int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx) { const struct k_clock *kc = clockid_to_kclock(which_clock); if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; return kc->clock_adj(which_clock, ktx); } SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, struct __kernel_timex __user *, utx) { struct __kernel_timex ktx; int err; if (copy_from_user(&ktx, utx, sizeof(ktx))) return -EFAULT; err = do_clock_adjtime(which_clock, &ktx); if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) return -EFAULT; return err; } /** * sys_clock_getres - Get the resolution of a clock * @which_clock: The clock to get the resolution for * @tp: Pointer to a a user space timespec64 for storage * * POSIX defines: * * "The clock_getres() function shall return the resolution of any * clock. Clock resolutions are implementation-defined and cannot be set by * a process. If the argument res is not NULL, the resolution of the * specified clock shall be stored in the location pointed to by res. If * res is NULL, the clock resolution is not returned. If the time argument * of clock_settime() is not a multiple of res, then the value is truncated * to a multiple of res." * * Due to the various hardware constraints the real resolution can vary * wildly and even change during runtime when the underlying devices are * replaced. The kernel also can use hardware devices with different * resolutions for reading the time and for arming timers. * * The kernel therefore deviates from the POSIX spec in various aspects: * * 1) The resolution returned to user space * * For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI, * CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW * the kernel differentiates only two cases: * * I) Low resolution mode: * * When high resolution timers are disabled at compile or runtime * the resolution returned is nanoseconds per tick, which represents * the precision at which timers expire. * * II) High resolution mode: * * When high resolution timers are enabled the resolution returned * is always one nanosecond independent of the actual resolution of * the underlying hardware devices. * * For CLOCK_*_ALARM the actual resolution depends on system * state. When system is running the resolution is the same as the * resolution of the other clocks. During suspend the actual * resolution is the resolution of the underlying RTC device which * might be way less precise than the clockevent device used during * running state. * * For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution * returned is always nanoseconds per tick. * * For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution * returned is always one nanosecond under the assumption that the * underlying scheduler clock has a better resolution than nanoseconds * per tick. * * For dynamic POSIX clocks (PTP devices) the resolution returned is * always one nanosecond. * * 2) Affect on sys_clock_settime() * * The kernel does not truncate the time which is handed in to * sys_clock_settime(). The kernel internal timekeeping is always using * nanoseconds precision independent of the clocksource device which is * used to read the time from. The resolution of that device only * affects the presicion of the time returned by sys_clock_gettime(). * * Returns: * 0 Success. @tp contains the resolution * -EINVAL @which_clock is not a valid clock ID * -EFAULT Copying the resolution to @tp faulted * -ENODEV Dynamic POSIX clock is not backed by a device * -EOPNOTSUPP Dynamic POSIX clock does not support getres() */ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct __kernel_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 rtn_tp; int error; if (!kc) return -EINVAL; error = kc->clock_getres(which_clock, &rtn_tp); if (!error && tp && put_timespec64(&rtn_tp, tp)) error = -EFAULT; return error; } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; if (!kc || !kc->clock_set) return -EINVAL; if (get_old_timespec32(&ts, tp)) return -EFAULT; return kc->clock_set(which_clock, &ts); } SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_get_timespec(which_clock, &ts); if (!err && put_old_timespec32(&ts, tp)) err = -EFAULT; return err; } SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, struct old_timex32 __user *, utp) { struct __kernel_timex ktx; int err; err = get_old_timex32(&ktx, utp); if (err) return err; err = do_clock_adjtime(which_clock, &ktx); if (err >= 0 && put_old_timex32(utp, &ktx)) return -EFAULT; return err; } SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_getres(which_clock, &ts); if (!err && tp && put_old_timespec32(&ts, tp)) return -EFAULT; return err; } #endif /* * sys_clock_nanosleep() for CLOCK_REALTIME and CLOCK_TAI */ static int common_nsleep(const clockid_t which_clock, int flags, const struct timespec64 *rqtp) { ktime_t texp = timespec64_to_ktime(*rqtp); return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); } /* * sys_clock_nanosleep() for CLOCK_MONOTONIC and CLOCK_BOOTTIME * * Absolute nanosleeps for these clocks are time-namespace adjusted. */ static int common_nsleep_timens(const clockid_t which_clock, int flags, const struct timespec64 *rqtp) { ktime_t texp = timespec64_to_ktime(*rqtp); if (flags & TIMER_ABSTIME) texp = timens_ktime_to_host(which_clock, texp); return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); } SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct __kernel_timespec __user *, rqtp, struct __kernel_timespec __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -EOPNOTSUPP; if (get_timespec64(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, struct old_timespec32 __user *, rqtp, struct old_timespec32 __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -EOPNOTSUPP; if (get_old_timespec32(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #endif static const struct k_clock clock_realtime = { .clock_getres = posix_get_hrtimer_res, .clock_get_timespec = posix_get_realtime_timespec, .clock_get_ktime = posix_get_realtime_ktime, .clock_set = posix_clock_realtime_set, .clock_adj = posix_clock_realtime_adj, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic = { .clock_getres = posix_get_hrtimer_res, .clock_get_timespec = posix_get_monotonic_timespec, .clock_get_ktime = posix_get_monotonic_ktime, .nsleep = common_nsleep_timens, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic_raw = { .clock_getres = posix_get_hrtimer_res, .clock_get_timespec = posix_get_monotonic_raw, }; static const struct k_clock clock_realtime_coarse = { .clock_getres = posix_get_coarse_res, .clock_get_timespec = posix_get_realtime_coarse, }; static const struct k_clock clock_monotonic_coarse = { .clock_getres = posix_get_coarse_res, .clock_get_timespec = posix_get_monotonic_coarse, }; static const struct k_clock clock_tai = { .clock_getres = posix_get_hrtimer_res, .clock_get_ktime = posix_get_tai_ktime, .clock_get_timespec = posix_get_tai_timespec, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_boottime = { .clock_getres = posix_get_hrtimer_res, .clock_get_ktime = posix_get_boottime_ktime, .clock_get_timespec = posix_get_boottime_timespec, .nsleep = common_nsleep_timens, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock * const posix_clocks[] = { [CLOCK_REALTIME] = &clock_realtime, [CLOCK_MONOTONIC] = &clock_monotonic, [CLOCK_PROCESS_CPUTIME_ID] = &clock_process, [CLOCK_THREAD_CPUTIME_ID] = &clock_thread, [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, [CLOCK_BOOTTIME] = &clock_boottime, [CLOCK_REALTIME_ALARM] = &alarm_clock, [CLOCK_BOOTTIME_ALARM] = &alarm_clock, [CLOCK_TAI] = &clock_tai, }; static const struct k_clock *clockid_to_kclock(const clockid_t id) { clockid_t idx = id; if (id < 0) { return (id & CLOCKFD_MASK) == CLOCKFD ? &clock_posix_dynamic : &clock_posix_cpu; } if (id >= ARRAY_SIZE(posix_clocks)) return NULL; return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))]; } static int __init posixtimer_init(void) { unsigned long i, size; unsigned int shift; if (IS_ENABLED(CONFIG_BASE_SMALL)) size = 512; else size = roundup_pow_of_two(512 * num_possible_cpus()); timer_buckets = alloc_large_system_hash("posixtimers", sizeof(*timer_buckets), size, 0, 0, &shift, NULL, size, size); size = 1UL << shift; timer_hashmask = size - 1; for (i = 0; i < size; i++) { spin_lock_init(&timer_buckets[i].lock); INIT_HLIST_HEAD(&timer_buckets[i].head); } return 0; } core_initcall(posixtimer_init);
4 2 2 2 2 2 2 2 2 2 1 1 2 1 2 2 1 6 5 1 4 4 4 2 2 2 2 2 6 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 // SPDX-License-Identifier: GPL-2.0-or-later /* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (C) 2002-2003 TiVo Inc. * Copyright (C) 2017-2018 ASIX * Copyright (C) 2018 Aquantia Corp. */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/linkmode.h> #include "aqc111.h" #define DRIVER_NAME "aqc111" static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); return ret; } static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); return ret; } static int aqc111_read16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { int ret = 0; ret = aqc111_read_cmd_nopm(dev, cmd, value, index, sizeof(*data), data); le16_to_cpus(data); return ret; } static int aqc111_read16_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { int ret = 0; ret = aqc111_read_cmd(dev, cmd, value, index, sizeof(*data), data); le16_to_cpus(data); return ret; } static int __aqc111_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, u16 size, const void *data) { int err = -ENOMEM; void *buf = NULL; netdev_dbg(dev->net, "%s cmd=%#x reqtype=%#x value=%#x index=%#x size=%d\n", __func__, cmd, reqtype, value, index, size); if (data) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) goto out; } err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, (cmd == AQ_PHY_POWER) ? AQ_USB_PHY_SET_TIMEOUT : AQ_USB_SET_TIMEOUT); if (unlikely(err < 0)) netdev_warn(dev->net, "Failed to write(0x%x) reg index 0x%04x: %d\n", cmd, index, err); kfree(buf); out: return err; } static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, size, data); return ret; } static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, const void *data) { int ret; if (usb_autopm_get_interface(dev->intf) < 0) return -ENODEV; ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, size, data); usb_autopm_put_interface(dev->intf); return ret; } static int aqc111_write16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write16_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write32_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u32 *data) { u32 tmp = *data; cpu_to_le32s(&tmp); return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write32_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u32 *data) { u32 tmp = *data; cpu_to_le32s(&tmp); return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { return usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); } static int aqc111_write16_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd_async(dev, cmd, value, index, sizeof(tmp), &tmp); } static void aqc111_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; /* Inherit standard device info */ usbnet_get_drvinfo(net, info); strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u", aqc111_data->fw_ver.major, aqc111_data->fw_ver.minor, aqc111_data->fw_ver.rev); info->eedump_len = 0x00; info->regdump_len = 0x00; } static void aqc111_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; wolinfo->supported = WAKE_MAGIC; wolinfo->wolopts = 0; if (aqc111_data->wol_flags & AQ_WOL_FLAG_MP) wolinfo->wolopts |= WAKE_MAGIC; } static int aqc111_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; if (wolinfo->wolopts & ~WAKE_MAGIC) return -EINVAL; aqc111_data->wol_flags = 0; if (wolinfo->wolopts & WAKE_MAGIC) aqc111_data->wol_flags |= AQ_WOL_FLAG_MP; return 0; } static void aqc111_speed_to_link_mode(u32 speed, struct ethtool_link_ksettings *elk) { switch (speed) { case SPEED_5000: ethtool_link_ksettings_add_link_mode(elk, advertising, 5000baseT_Full); break; case SPEED_2500: ethtool_link_ksettings_add_link_mode(elk, advertising, 2500baseT_Full); break; case SPEED_1000: ethtool_link_ksettings_add_link_mode(elk, advertising, 1000baseT_Full); break; case SPEED_100: ethtool_link_ksettings_add_link_mode(elk, advertising, 100baseT_Full); break; } } static int aqc111_get_link_ksettings(struct net_device *net, struct ethtool_link_ksettings *elk) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; enum usb_device_speed usb_speed = dev->udev->speed; u32 speed = SPEED_UNKNOWN; ethtool_link_ksettings_zero_link_mode(elk, supported); ethtool_link_ksettings_add_link_mode(elk, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(elk, supported, 1000baseT_Full); if (usb_speed == USB_SPEED_SUPER) { ethtool_link_ksettings_add_link_mode(elk, supported, 2500baseT_Full); ethtool_link_ksettings_add_link_mode(elk, supported, 5000baseT_Full); } ethtool_link_ksettings_add_link_mode(elk, supported, TP); ethtool_link_ksettings_add_link_mode(elk, supported, Autoneg); elk->base.port = PORT_TP; elk->base.transceiver = XCVR_INTERNAL; elk->base.mdio_support = 0x00; /*Not supported*/ if (aqc111_data->autoneg) linkmode_copy(elk->link_modes.advertising, elk->link_modes.supported); else aqc111_speed_to_link_mode(aqc111_data->advertised_speed, elk); elk->base.autoneg = aqc111_data->autoneg; switch (aqc111_data->link_speed) { case AQ_INT_SPEED_5G: speed = SPEED_5000; break; case AQ_INT_SPEED_2_5G: speed = SPEED_2500; break; case AQ_INT_SPEED_1G: speed = SPEED_1000; break; case AQ_INT_SPEED_100M: speed = SPEED_100; break; } elk->base.duplex = DUPLEX_FULL; elk->base.speed = speed; return 0; } static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed) { struct aqc111_data *aqc111_data = dev->driver_priv; aqc111_data->phy_cfg &= ~AQ_ADV_MASK; aqc111_data->phy_cfg |= AQ_PAUSE; aqc111_data->phy_cfg |= AQ_ASYM_PAUSE; aqc111_data->phy_cfg |= AQ_DOWNSHIFT; aqc111_data->phy_cfg &= ~AQ_DSH_RETRIES_MASK; aqc111_data->phy_cfg |= (3 << AQ_DSH_RETRIES_SHIFT) & AQ_DSH_RETRIES_MASK; if (autoneg == AUTONEG_ENABLE) { switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; fallthrough; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; fallthrough; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; fallthrough; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; /* fall-through */ } } else { switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; break; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; break; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; break; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; break; } } aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); } static int aqc111_set_link_ksettings(struct net_device *net, const struct ethtool_link_ksettings *elk) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; enum usb_device_speed usb_speed = dev->udev->speed; u8 autoneg = elk->base.autoneg; u32 speed = elk->base.speed; if (autoneg == AUTONEG_ENABLE) { if (aqc111_data->autoneg != AUTONEG_ENABLE) { aqc111_data->autoneg = AUTONEG_ENABLE; aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ? SPEED_5000 : SPEED_1000; aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); } } else { if (speed != SPEED_100 && speed != SPEED_1000 && speed != SPEED_2500 && speed != SPEED_5000 && speed != SPEED_UNKNOWN) return -EINVAL; if (elk->base.duplex != DUPLEX_FULL) return -EINVAL; if (usb_speed != USB_SPEED_SUPER && speed > SPEED_1000) return -EINVAL; aqc111_data->autoneg = AUTONEG_DISABLE; if (speed != SPEED_UNKNOWN) aqc111_data->advertised_speed = speed; aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); } return 0; } static const struct ethtool_ops aqc111_ethtool_ops = { .get_drvinfo = aqc111_get_drvinfo, .get_wol = aqc111_get_wol, .set_wol = aqc111_set_wol, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_link = ethtool_op_get_link, .get_link_ksettings = aqc111_get_link_ksettings, .set_link_ksettings = aqc111_set_link_ksettings }; static int aqc111_change_mtu(struct net_device *net, int new_mtu) { struct usbnet *dev = netdev_priv(net); u16 reg16 = 0; u8 buf[5]; WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); if (net->mtu > 1500) reg16 |= SFR_MEDIUM_JUMBO_EN; else reg16 &= ~SFR_MEDIUM_JUMBO_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); if (dev->net->mtu > 12500) { memcpy(buf, &AQC111_BULKIN_SIZE[2], 5); /* RX bulk configuration */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf); } /* Set high low water level */ if (dev->net->mtu <= 4500) reg16 = 0x0810; else if (dev->net->mtu <= 9500) reg16 = 0x1020; else if (dev->net->mtu <= 12500) reg16 = 0x1420; else reg16 = 0x1A20; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, 2, &reg16); return 0; } static int aqc111_set_mac_addr(struct net_device *net, void *p) { struct usbnet *dev = netdev_priv(net); int ret = 0; ret = eth_mac_addr(net, p); if (ret < 0) return ret; /* Set the MAC address */ return aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, ETH_ALEN, net->dev_addr); } static int aqc111_vlan_rx_kill_vid(struct net_device *net, __be16 proto, u16 vid) { struct usbnet *dev = netdev_priv(net); u8 vlan_ctrl = 0; u16 reg16 = 0; u8 reg8 = 0; aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); vlan_ctrl = reg8; /* Address */ reg8 = (vid / 16); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8); /* Data */ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg16 &= ~(1 << (vid % 16)); aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); return 0; } static int aqc111_vlan_rx_add_vid(struct net_device *net, __be16 proto, u16 vid) { struct usbnet *dev = netdev_priv(net); u8 vlan_ctrl = 0; u16 reg16 = 0; u8 reg8 = 0; aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); vlan_ctrl = reg8; /* Address */ reg8 = (vid / 16); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8); /* Data */ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg16 |= (1 << (vid % 16)); aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); return 0; } static void aqc111_set_rx_mode(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; int mc_count = 0; mc_count = netdev_mc_count(net); aqc111_data->rxctl &= ~(SFR_RX_CTL_PRO | SFR_RX_CTL_AMALL | SFR_RX_CTL_AM); if (net->flags & IFF_PROMISC) { aqc111_data->rxctl |= SFR_RX_CTL_PRO; } else if ((net->flags & IFF_ALLMULTI) || mc_count > AQ_MAX_MCAST) { aqc111_data->rxctl |= SFR_RX_CTL_AMALL; } else if (!netdev_mc_empty(net)) { u8 m_filter[AQ_MCAST_FILTER_SIZE] = { 0 }; struct netdev_hw_addr *ha = NULL; u32 crc_bits = 0; netdev_for_each_mc_addr(ha, net) { crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; m_filter[crc_bits >> 3] |= BIT(crc_bits & 7); } aqc111_write_cmd_async(dev, AQ_ACCESS_MAC, SFR_MULTI_FILTER_ARRY, AQ_MCAST_FILTER_SIZE, AQ_MCAST_FILTER_SIZE, m_filter); aqc111_data->rxctl |= SFR_RX_CTL_AM; } aqc111_write16_cmd_async(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); } static int aqc111_set_features(struct net_device *net, netdev_features_t features) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; netdev_features_t changed = net->features ^ features; u16 reg16 = 0; u8 reg8 = 0; if (changed & NETIF_F_IP_CSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); reg8 ^= SFR_TXCOE_TCP | SFR_TXCOE_UDP; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); } if (changed & NETIF_F_IPV6_CSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); reg8 ^= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); } if (changed & NETIF_F_RXCSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8); if (features & NETIF_F_RXCSUM) { aqc111_data->rx_checksum = 1; reg8 &= ~(SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6); } else { aqc111_data->rx_checksum = 0; reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8); } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { u16 i = 0; for (i = 0; i < 256; i++) { /* Address */ reg8 = i; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8); /* Data */ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg8 = SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); } aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); reg8 |= SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); } else { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); reg8 &= ~SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); } } return 0; } static const struct net_device_ops aqc111_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = aqc111_change_mtu, .ndo_set_mac_address = aqc111_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = aqc111_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aqc111_vlan_rx_kill_vid, .ndo_set_rx_mode = aqc111_set_rx_mode, .ndo_set_features = aqc111_set_features, }; static int aqc111_read_perm_mac(struct usbnet *dev) { u8 buf[ETH_ALEN]; int ret; ret = aqc111_read_cmd(dev, AQ_FLASH_PARAMETERS, 0, 0, ETH_ALEN, buf); if (ret < 0) goto out; ether_addr_copy(dev->net->perm_addr, buf); return 0; out: return ret; } static void aqc111_read_fw_version(struct usbnet *dev, struct aqc111_data *aqc111_data) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MAJOR, 1, 1, &aqc111_data->fw_ver.major); aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MINOR, 1, 1, &aqc111_data->fw_ver.minor); aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_REV, 1, 1, &aqc111_data->fw_ver.rev); if (aqc111_data->fw_ver.major & 0x80) aqc111_data->fw_ver.major &= ~0x80; } static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); enum usb_device_speed usb_speed = udev->speed; struct aqc111_data *aqc111_data; int ret; /* Check if vendor configuration */ if (udev->actconfig->desc.bConfigurationValue != 1) { usb_driver_set_configuration(udev, 1); return -ENODEV; } usb_reset_configuration(dev->udev); ret = usbnet_get_endpoints(dev, intf); if (ret < 0) { netdev_dbg(dev->net, "usbnet_get_endpoints failed"); return ret; } aqc111_data = kzalloc(sizeof(*aqc111_data), GFP_KERNEL); if (!aqc111_data) return -ENOMEM; /* store aqc111_data pointer in device data field */ dev->driver_priv = aqc111_data; /* Init the MAC address */ ret = aqc111_read_perm_mac(dev); if (ret) goto out; eth_hw_addr_set(dev->net, dev->net->perm_addr); /* Set Rx urb size */ dev->rx_urb_size = URB_SIZE; /* Set TX needed headroom & tailroom */ dev->net->needed_headroom += sizeof(u64); dev->net->needed_tailroom += sizeof(u64); dev->net->max_mtu = 16334; dev->net->netdev_ops = &aqc111_netdev_ops; dev->net->ethtool_ops = &aqc111_ethtool_ops; if (usb_device_no_sg_constraint(dev->udev)) dev->can_dma_sg = 1; dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; dev->net->features |= AQ_SUPPORT_FEATURE; dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; netif_set_tso_max_size(dev->net, 65535); aqc111_read_fw_version(dev, aqc111_data); aqc111_data->autoneg = AUTONEG_ENABLE; aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ? SPEED_5000 : SPEED_1000; return 0; out: kfree(aqc111_data); return ret; } static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16; /* Force bz */ reg16 = SFR_PHYPWR_RSTCTL_BZ; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); reg16 = 0; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); /* Power down ethernet PHY */ aqc111_data->phy_cfg &= ~AQ_ADV_MASK; aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN; aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); kfree(aqc111_data); } static void aqc111_status(struct usbnet *dev, struct urb *urb) { struct aqc111_data *aqc111_data = dev->driver_priv; u64 *event_data = NULL; int link = 0; if (urb->actual_length < sizeof(*event_data)) return; event_data = urb->transfer_buffer; le64_to_cpus(event_data); if (*event_data & AQ_LS_MASK) link = 1; else link = 0; aqc111_data->link_speed = (*event_data & AQ_SPEED_MASK) >> AQ_SPEED_SHIFT; aqc111_data->link = link; if (netif_carrier_ok(dev->net) != link) usbnet_defer_kevent(dev, EVENT_LINK_RESET); } static void aqc111_configure_rx(struct usbnet *dev, struct aqc111_data *aqc111_data) { enum usb_device_speed usb_speed = dev->udev->speed; u16 link_speed = 0, usb_host = 0; u8 buf[5] = { 0 }; u8 queue_num = 0; u16 reg16 = 0; u8 reg8 = 0; buf[0] = 0x00; buf[1] = 0xF8; buf[2] = 0x07; switch (aqc111_data->link_speed) { case AQ_INT_SPEED_5G: link_speed = 5000; reg8 = 0x05; reg16 = 0x001F; break; case AQ_INT_SPEED_2_5G: link_speed = 2500; reg16 = 0x003F; break; case AQ_INT_SPEED_1G: link_speed = 1000; reg16 = 0x009F; break; case AQ_INT_SPEED_100M: link_speed = 100; queue_num = 1; reg16 = 0x063F; buf[1] = 0xFB; buf[2] = 0x4; break; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_INTER_PACKET_GAP_0, 1, 1, &reg8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TX_PAUSE_RESEND_T, 3, 3, buf); switch (usb_speed) { case USB_SPEED_SUPER: usb_host = 3; break; case USB_SPEED_HIGH: usb_host = 2; break; case USB_SPEED_FULL: case USB_SPEED_LOW: usb_host = 1; queue_num = 0; break; default: usb_host = 0; break; } if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) queue_num = 2; /* For Jumbo packet 16KB */ memcpy(buf, &AQC111_BULKIN_SIZE[queue_num], 5); /* RX bulk configuration */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf); /* Set high low water level */ if (dev->net->mtu <= 4500) reg16 = 0x0810; else if (dev->net->mtu <= 9500) reg16 = 0x1020; else if (dev->net->mtu <= 12500) reg16 = 0x1420; else if (dev->net->mtu <= 16334) reg16 = 0x1A20; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, 2, &reg16); netdev_info(dev->net, "Link Speed %d, USB %d", link_speed, usb_host); } static void aqc111_configure_csum_offload(struct usbnet *dev) { u8 reg8 = 0; if (dev->net->features & NETIF_F_RXCSUM) { reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8); reg8 = 0; if (dev->net->features & NETIF_F_IP_CSUM) reg8 |= SFR_TXCOE_IP | SFR_TXCOE_TCP | SFR_TXCOE_UDP; if (dev->net->features & NETIF_F_IPV6_CSUM) reg8 |= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); } static int aqc111_link_reset(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16 = 0; u8 reg8 = 0; if (aqc111_data->link == 1) { /* Link up */ aqc111_configure_rx(dev, aqc111_data); /* Vlan Tag Filter */ reg8 = SFR_VLAN_CONTROL_VSO; if (dev->net->features & NETIF_F_HW_VLAN_CTAG_FILTER) reg8 |= SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMTX_DMA_CONTROL, 1, 1, &reg8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ARC_CTRL, 1, 1, &reg8); reg16 = SFR_RX_CTL_IPE | SFR_RX_CTL_AB; aqc111_data->rxctl = reg16; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); reg16 = 0; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 = SFR_MEDIUM_XGMIIMODE | SFR_MEDIUM_FULL_DUPLEX; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_configure_csum_offload(dev); aqc111_set_rx_mode(dev->net); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); if (dev->net->mtu > 1500) reg16 |= SFR_MEDIUM_JUMBO_EN; reg16 |= SFR_MEDIUM_RECEIVE_EN | SFR_MEDIUM_RXFLOW_CTRLEN | SFR_MEDIUM_TXFLOW_CTRLEN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_data->rxctl |= SFR_RX_CTL_START; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); netif_carrier_on(dev->net); } else { aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_data->rxctl &= ~SFR_RX_CTL_START; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); reg8 = SFR_BULK_OUT_FLUSH_EN | SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); netif_carrier_off(dev->net); } return 0; } static int aqc111_reset(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u8 reg8 = 0; dev->rx_urb_size = URB_SIZE; if (usb_device_no_sg_constraint(dev->udev)) dev->can_dma_sg = 1; dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; dev->net->features |= AQ_SUPPORT_FEATURE; dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; /* Power up ethernet PHY */ aqc111_data->phy_cfg = AQ_PHY_POWER_EN; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); /* Set the MAC address */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, ETH_ALEN, dev->net->dev_addr); reg8 = 0xFF; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_SWP_CTRL, 1, 1, &reg8); aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8); reg8 &= ~(SFR_MONITOR_MODE_EPHYRW | SFR_MONITOR_MODE_RWLC | SFR_MONITOR_MODE_RWMP | SFR_MONITOR_MODE_RWWF | SFR_MONITOR_MODE_RW_FLAG); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8); netif_carrier_off(dev->net); /* Phy advertise */ aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); return 0; } static int aqc111_stop(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16 = 0; aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 = 0; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); /* Put PHY to low power*/ aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); netif_carrier_off(dev->net); return 0; } static void aqc111_rx_checksum(struct sk_buff *skb, u64 pkt_desc) { u32 pkt_type = 0; skb->ip_summed = CHECKSUM_NONE; /* checksum error bit is set */ if (pkt_desc & AQ_RX_PD_L4_ERR || pkt_desc & AQ_RX_PD_L3_ERR) return; pkt_type = pkt_desc & AQ_RX_PD_L4_TYPE_MASK; /* It must be a TCP or UDP packet with a valid checksum */ if (pkt_type == AQ_RX_PD_L4_TCP || pkt_type == AQ_RX_PD_L4_UDP) skb->ip_summed = CHECKSUM_UNNECESSARY; } static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct aqc111_data *aqc111_data = dev->driver_priv; struct sk_buff *new_skb = NULL; u32 pkt_total_offset = 0; u64 *pkt_desc_ptr = NULL; u32 start_of_descs = 0; u32 desc_offset = 0; /*RX Header Offset*/ u16 pkt_count = 0; u64 desc_hdr = 0; u16 vlan_tag = 0; u32 skb_len; if (!skb) goto err; skb_len = skb->len; if (skb_len < sizeof(desc_hdr)) goto err; /* RX Descriptor Header */ skb_trim(skb, skb_len - sizeof(desc_hdr)); desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb)); /* Check these packets */ desc_offset = (desc_hdr & AQ_RX_DH_DESC_OFFSET_MASK) >> AQ_RX_DH_DESC_OFFSET_SHIFT; pkt_count = desc_hdr & AQ_RX_DH_PKT_CNT_MASK; start_of_descs = skb_len - ((pkt_count + 1) * sizeof(desc_hdr)); /* self check descs position */ if (start_of_descs != desc_offset) goto err; /* self check desc_offset from header and make sure that the * bounds of the metadata array are inside the SKB */ if (pkt_count * 2 + desc_offset >= skb_len) goto err; /* Packets must not overlap the metadata array */ skb_trim(skb, desc_offset); if (pkt_count == 0) goto err; /* Get the first RX packet descriptor */ pkt_desc_ptr = (u64 *)(skb->data + desc_offset); while (pkt_count--) { u64 pkt_desc = le64_to_cpup(pkt_desc_ptr); u32 pkt_len_with_padd = 0; u32 pkt_len = 0; pkt_len = (u32)((pkt_desc & AQ_RX_PD_LEN_MASK) >> AQ_RX_PD_LEN_SHIFT); pkt_len_with_padd = ((pkt_len + 7) & 0x7FFF8); pkt_total_offset += pkt_len_with_padd; if (pkt_total_offset > desc_offset || (pkt_count == 0 && pkt_total_offset != desc_offset)) { goto err; } if (pkt_desc & AQ_RX_PD_DROP || !(pkt_desc & AQ_RX_PD_RX_OK) || pkt_len > (dev->hard_mtu + AQ_RX_HW_PAD)) { skb_pull(skb, pkt_len_with_padd); /* Next RX Packet Descriptor */ pkt_desc_ptr++; continue; } new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len); if (!new_skb) goto err; skb_put(new_skb, pkt_len); memcpy(new_skb->data, skb->data, pkt_len); skb_pull(new_skb, AQ_RX_HW_PAD); if (aqc111_data->rx_checksum) aqc111_rx_checksum(new_skb, pkt_desc); if (pkt_desc & AQ_RX_PD_VLAN) { vlan_tag = pkt_desc >> AQ_RX_PD_VLAN_SHIFT; __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q), vlan_tag & VLAN_VID_MASK); } usbnet_skb_return(dev, new_skb); if (pkt_count == 0) break; skb_pull(skb, pkt_len_with_padd); /* Next RX Packet Header */ pkt_desc_ptr++; new_skb = NULL; } return 1; err: return 0; } static struct sk_buff *aqc111_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int frame_size = dev->maxpacket; struct sk_buff *new_skb = NULL; u64 *tx_desc_ptr = NULL; int padding_size = 0; int headroom = 0; int tailroom = 0; u64 tx_desc = 0; u16 tci = 0; /*Length of actual data*/ tx_desc |= skb->len & AQ_TX_DESC_LEN_MASK; /* TSO MSS */ tx_desc |= ((u64)(skb_shinfo(skb)->gso_size & AQ_TX_DESC_MSS_MASK)) << AQ_TX_DESC_MSS_SHIFT; headroom = (skb->len + sizeof(tx_desc)) % 8; if (headroom != 0) padding_size = 8 - headroom; if (((skb->len + sizeof(tx_desc) + padding_size) % frame_size) == 0) { padding_size += 8; tx_desc |= AQ_TX_DESC_DROP_PADD; } /* Vlan Tag */ if (vlan_get_tag(skb, &tci) >= 0) { tx_desc |= AQ_TX_DESC_VLAN; tx_desc |= ((u64)tci & AQ_TX_DESC_VLAN_MASK) << AQ_TX_DESC_VLAN_SHIFT; } if (!dev->can_dma_sg && (dev->net->features & NETIF_F_SG) && skb_linearize(skb)) return NULL; headroom = skb_headroom(skb); tailroom = skb_tailroom(skb); if (!(headroom >= sizeof(tx_desc) && tailroom >= padding_size)) { new_skb = skb_copy_expand(skb, sizeof(tx_desc), padding_size, flags); dev_kfree_skb_any(skb); skb = new_skb; if (!skb) return NULL; } if (padding_size != 0) skb_put_zero(skb, padding_size); /* Copy TX header */ tx_desc_ptr = skb_push(skb, sizeof(tx_desc)); *tx_desc_ptr = cpu_to_le64(tx_desc); usbnet_set_skb_tx_stats(skb, 1, 0); return skb; } static const struct driver_info aqc111_info = { .description = "Aquantia AQtion USB to 5GbE Controller", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #define ASIX111_DESC \ "ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter" static const struct driver_info asix111_info = { .description = ASIX111_DESC, .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #undef ASIX111_DESC #define ASIX112_DESC \ "ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter" static const struct driver_info asix112_info = { .description = ASIX112_DESC, .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #undef ASIX112_DESC static const struct driver_info trendnet_info = { .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; static const struct driver_info qnap_info = { .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); struct aqc111_data *aqc111_data = dev->driver_priv; u16 temp_rx_ctrl = 0x00; u16 reg16; u8 reg8; usbnet_suspend(intf, message); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); temp_rx_ctrl = reg16; /* Stop RX operations*/ reg16 &= ~SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); /* Force bz */ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); reg16 |= SFR_PHYPWR_RSTCTL_BZ; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); temp_rx_ctrl &= ~(SFR_RX_CTL_START | SFR_RX_CTL_RF_WAK | SFR_RX_CTL_AP | SFR_RX_CTL_AM); aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &temp_rx_ctrl); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); if (aqc111_data->wol_flags) { struct aqc111_wol_cfg wol_cfg; memset(&wol_cfg, 0, sizeof(struct aqc111_wol_cfg)); aqc111_data->phy_cfg |= AQ_WOL; ether_addr_copy(wol_cfg.hw_addr, dev->net->dev_addr); wol_cfg.flags = aqc111_data->wol_flags; temp_rx_ctrl |= (SFR_RX_CTL_AB | SFR_RX_CTL_START); aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &temp_rx_ctrl); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8); reg8 = SFR_BMRX_DMA_EN; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); reg8 = 0x07; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 1, 1, &reg8); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QTIMR_LOW, 1, 1, &reg8); aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QTIMR_HIGH, 1, 1, &reg8); reg8 = 0xFF; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QSIZE, 1, 1, &reg8); aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QIFG, 1, 1, &reg8); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 |= SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0, WOL_CFG_SIZE, &wol_cfg); aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); } else { aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); /* Disable RX path */ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); } return 0; } static int aqc111_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16; u8 reg8; netif_carrier_off(dev->net); /* Power up ethernet PHY */ aqc111_data->phy_cfg |= AQ_PHY_POWER_EN; aqc111_data->phy_cfg &= ~AQ_LOW_POWER; aqc111_data->phy_cfg &= ~AQ_WOL; reg8 = 0xFF; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8); /* Configure RX control register => start operation */ reg16 = aqc111_data->rxctl; reg16 &= ~SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); reg16 |= SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 |= SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8); return usbnet_resume(intf); } #define AQC111_USB_ETH_DEV(vid, pid, table) \ USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_VENDOR_SPEC), \ .driver_info = (unsigned long)&(table) \ }, \ { \ USB_DEVICE_AND_INTERFACE_INFO((vid), (pid), \ USB_CLASS_COMM, \ USB_CDC_SUBCLASS_ETHERNET, \ USB_CDC_PROTO_NONE), \ .driver_info = (unsigned long)&(table), static const struct usb_device_id products[] = { {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)}, { },/* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver aq_driver = { .name = "aqc111", .id_table = products, .probe = usbnet_probe, .suspend = aqc111_suspend, .resume = aqc111_resume, .disconnect = usbnet_disconnect, }; module_usb_driver(aq_driver); MODULE_DESCRIPTION("Aquantia AQtion USB to 5/2.5GbE Controllers"); MODULE_LICENSE("GPL");
4 4 4 4 4 4 4 4 4 4 4 3 4 1 1 1 1 1 1 1 1 1 4 4 4 4 1 4 4 1 4 4 1 4 4 4 4 4 1 4 10 1 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 // SPDX-License-Identifier: GPL-2.0-or-later /* * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver * * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) * Copyright (C) 2015 John Horan (knasher@gmail.com) * * The USB initialization and package decoding was made by * Scott Shawcroft as part of the touchd user-space driver project: * Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com) * * The BCM5974 driver is based on the appletouch driver: * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net) * Copyright (C) 2005 Stelian Pop (stelian@popies.net) * Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de) * Copyright (C) 2005 Peter Osterlund (petero2@telia.com) * Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch) * Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #include <linux/hid.h> #include <linux/mutex.h> #include <linux/input/mt.h> #define USB_VENDOR_ID_APPLE 0x05ac /* MacbookAir, aka wellspring */ #define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223 #define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224 #define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225 /* MacbookProPenryn, aka wellspring2 */ #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 /* Macbook5,1 (unibody), aka wellspring3 */ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 /* MacbookAir3,2 (unibody), aka wellspring5 */ #define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f #define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240 #define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241 /* MacbookAir3,1 (unibody), aka wellspring4 */ #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 /* Macbook8 (unibody, March 2011) */ #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 /* MacbookAir4,1 (unibody, July 2011) */ #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249 #define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a #define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b /* MacbookAir4,2 (unibody, July 2011) */ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e /* Macbook8,2 (unibody) */ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 /* MacbookPro10,1 (unibody, June 2012) */ #define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262 #define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263 #define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264 /* MacbookPro10,2 (unibody, October 2012) */ #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259 #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b /* MacbookAir6,2 (unibody, June 2013) */ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 /* MacbookPro12,1 (2015) */ #define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272 #define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273 #define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL), \ .idVendor = USB_VENDOR_ID_APPLE, \ .idProduct = (prod), \ .bInterfaceClass = USB_INTERFACE_CLASS_HID, \ .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \ } /* table of devices that work with this driver */ static const struct usb_device_id bcm5974_table[] = { /* MacbookAir1.1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS), /* MacbookProPenryn */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS), /* Macbook5,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), /* MacbookAir3,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS), /* MacbookAir3,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), /* MacbookPro8 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), /* MacbookAir4,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS), /* MacbookAir4,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), /* MacbookPro8,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS), /* MacbookPro10,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS), /* MacbookPro10,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), /* MacbookAir6,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), /* MacbookPro12,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), /* Terminating entry */ {} }; MODULE_DEVICE_TABLE(usb, bcm5974_table); MODULE_AUTHOR("Henrik Rydberg"); MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver"); MODULE_LICENSE("GPL"); #define dprintk(level, format, a...)\ { if (debug >= level) printk(KERN_DEBUG format, ##a); } static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activate debugging output"); /* button data structure */ struct bt_data { u8 unknown1; /* constant */ u8 button; /* left button */ u8 rel_x; /* relative x coordinate */ u8 rel_y; /* relative y coordinate */ }; /* trackpad header types */ enum tp_type { TYPE1, /* plain trackpad */ TYPE2, /* button integrated in trackpad */ TYPE3, /* additional header fields since June 2013 */ TYPE4 /* additional header field for pressure data */ }; /* trackpad finger data offsets, le16-aligned */ #define HEADER_TYPE1 (13 * sizeof(__le16)) #define HEADER_TYPE2 (15 * sizeof(__le16)) #define HEADER_TYPE3 (19 * sizeof(__le16)) #define HEADER_TYPE4 (23 * sizeof(__le16)) /* trackpad button data offsets */ #define BUTTON_TYPE1 0 #define BUTTON_TYPE2 15 #define BUTTON_TYPE3 23 #define BUTTON_TYPE4 31 /* list of device capability bits */ #define HAS_INTEGRATED_BUTTON 1 /* trackpad finger data block size */ #define FSIZE_TYPE1 (14 * sizeof(__le16)) #define FSIZE_TYPE2 (14 * sizeof(__le16)) #define FSIZE_TYPE3 (14 * sizeof(__le16)) #define FSIZE_TYPE4 (15 * sizeof(__le16)) /* offset from header to finger struct */ #define DELTA_TYPE1 (0 * sizeof(__le16)) #define DELTA_TYPE2 (0 * sizeof(__le16)) #define DELTA_TYPE3 (0 * sizeof(__le16)) #define DELTA_TYPE4 (1 * sizeof(__le16)) /* usb control message mode switch data */ #define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8 #define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8 #define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8 #define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0 /* Wellspring initialization constants */ #define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 #define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 /* trackpad finger structure, le16-aligned */ struct tp_finger { __le16 origin; /* zero when switching track finger */ __le16 abs_x; /* absolute x coodinate */ __le16 abs_y; /* absolute y coodinate */ __le16 rel_x; /* relative x coodinate */ __le16 rel_y; /* relative y coodinate */ __le16 tool_major; /* tool area, major axis */ __le16 tool_minor; /* tool area, minor axis */ __le16 orientation; /* 16384 when point, else 15 bit angle */ __le16 touch_major; /* touch area, major axis */ __le16 touch_minor; /* touch area, minor axis */ __le16 unused[2]; /* zeros */ __le16 pressure; /* pressure on forcetouch touchpad */ __le16 multi; /* one finger: varies, more fingers: constant */ } __attribute__((packed,aligned(2))); /* trackpad finger data size, empirically at least ten fingers */ #define MAX_FINGERS 16 #define MAX_FINGER_ORIENTATION 16384 /* device-specific parameters */ struct bcm5974_param { int snratio; /* signal-to-noise ratio */ int min; /* device minimum reading */ int max; /* device maximum reading */ }; /* device-specific configuration */ struct bcm5974_config { int ansi, iso, jis; /* the product id of this device */ int caps; /* device capability bitmask */ int bt_ep; /* the endpoint of the button interface */ int bt_datalen; /* data length of the button interface */ int tp_ep; /* the endpoint of the trackpad interface */ enum tp_type tp_type; /* type of trackpad interface */ int tp_header; /* bytes in header block */ int tp_datalen; /* data length of the trackpad interface */ int tp_button; /* offset to button data */ int tp_fsize; /* bytes in single finger block */ int tp_delta; /* offset from header to finger struct */ int um_size; /* usb control message length */ int um_req_val; /* usb control message value */ int um_req_idx; /* usb control message index */ int um_switch_idx; /* usb control message mode switch index */ int um_switch_on; /* usb control message mode switch on */ int um_switch_off; /* usb control message mode switch off */ struct bcm5974_param p; /* finger pressure limits */ struct bcm5974_param w; /* finger width limits */ struct bcm5974_param x; /* horizontal limits */ struct bcm5974_param y; /* vertical limits */ struct bcm5974_param o; /* orientation limits */ }; /* logical device structure */ struct bcm5974 { char phys[64]; struct usb_device *udev; /* usb device */ struct usb_interface *intf; /* our interface */ struct input_dev *input; /* input dev */ struct bcm5974_config cfg; /* device configuration */ struct mutex pm_mutex; /* serialize access to open/suspend */ int opened; /* 1: opened, 0: closed */ struct urb *bt_urb; /* button usb request block */ struct bt_data *bt_data; /* button transferred data */ struct urb *tp_urb; /* trackpad usb request block */ u8 *tp_data; /* trackpad transferred data */ const struct tp_finger *index[MAX_FINGERS]; /* finger index data */ struct input_mt_pos pos[MAX_FINGERS]; /* position array */ int slots[MAX_FINGERS]; /* slot assignments */ }; /* trackpad finger block data, le16-aligned */ static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i) { const struct bcm5974_config *c = &dev->cfg; u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta; return (const struct tp_finger *)(f_base + i * c->tp_fsize); } #define DATAFORMAT(type) \ type, \ HEADER_##type, \ HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \ BUTTON_##type, \ FSIZE_##type, \ DELTA_##type, \ USBMSG_##type /* logical signal quality */ #define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ #define SN_WIDTH 25 /* width signal-to-noise ratio */ #define SN_COORD 250 /* coordinate signal-to-noise ratio */ #define SN_ORIENT 10 /* orientation signal-to-noise ratio */ /* device constants */ static const struct bcm5974_config bcm5974_config_table[] = { { USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 0, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE1), { SN_PRESSURE, 0, 256 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4824, 5342 }, { SN_COORD, -172, 5820 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 0, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE1), { SN_PRESSURE, 0, 256 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4824, 4824 }, { SN_COORD, -172, 4290 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4460, 5166 }, { SN_COORD, -75, 6700 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, { SN_COORD, -150, 6600 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4616, 5112 }, { SN_COORD, -142, 5234 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4415, 5050 }, { SN_COORD, -55, 6680 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, { SN_COORD, -150, 6600 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, { SN_COORD, -150, 6730 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, { SN_COORD, -150, 6600 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, { SN_COORD, -150, 6730 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, { SN_COORD, -150, 6730 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, HAS_INTEGRATED_BUTTON, 0, sizeof(struct bt_data), 0x83, DATAFORMAT(TYPE3), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, { SN_COORD, -150, 6600 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS, HAS_INTEGRATED_BUTTON, 0, sizeof(struct bt_data), 0x83, DATAFORMAT(TYPE4), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4828, 5345 }, { SN_COORD, -203, 6803 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, {} }; /* return the device-specific configuration by device */ static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev) { u16 id = le16_to_cpu(udev->descriptor.idProduct); const struct bcm5974_config *cfg; for (cfg = bcm5974_config_table; cfg->ansi; ++cfg) if (cfg->ansi == id || cfg->iso == id || cfg->jis == id) return cfg; return bcm5974_config_table; } /* convert 16-bit little endian to signed integer */ static inline int raw2int(__le16 x) { return (signed short)le16_to_cpu(x); } static void set_abs(struct input_dev *input, unsigned int code, const struct bcm5974_param *p) { int fuzz = p->snratio ? (p->max - p->min) / p->snratio : 0; input_set_abs_params(input, code, p->min, p->max, fuzz, 0); } /* setup which logical events to report */ static void setup_events_to_report(struct input_dev *input_dev, const struct bcm5974_config *cfg) { __set_bit(EV_ABS, input_dev->evbit); /* for synaptics only */ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 256, 5, 0); input_set_abs_params(input_dev, ABS_TOOL_WIDTH, 0, 16, 0, 0); /* finger touch area */ set_abs(input_dev, ABS_MT_TOUCH_MAJOR, &cfg->w); set_abs(input_dev, ABS_MT_TOUCH_MINOR, &cfg->w); /* finger approach area */ set_abs(input_dev, ABS_MT_WIDTH_MAJOR, &cfg->w); set_abs(input_dev, ABS_MT_WIDTH_MINOR, &cfg->w); /* finger orientation */ set_abs(input_dev, ABS_MT_ORIENTATION, &cfg->o); /* finger position */ set_abs(input_dev, ABS_MT_POSITION_X, &cfg->x); set_abs(input_dev, ABS_MT_POSITION_Y, &cfg->y); __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_LEFT, input_dev->keybit); if (cfg->caps & HAS_INTEGRATED_BUTTON) __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); input_mt_init_slots(input_dev, MAX_FINGERS, INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK); } /* report button data as logical button state */ static int report_bt_state(struct bcm5974 *dev, int size) { if (size != sizeof(struct bt_data)) return -EIO; dprintk(7, "bcm5974: button data: %x %x %x %x\n", dev->bt_data->unknown1, dev->bt_data->button, dev->bt_data->rel_x, dev->bt_data->rel_y); input_report_key(dev->input, BTN_LEFT, dev->bt_data->button); input_sync(dev->input); return 0; } static void report_finger_data(struct input_dev *input, int slot, const struct input_mt_pos *pos, const struct tp_finger *f) { input_mt_slot(input, slot); input_mt_report_slot_state(input, MT_TOOL_FINGER, true); input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->touch_major) << 1); input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->touch_minor) << 1); input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->tool_major) << 1); input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->tool_minor) << 1); input_report_abs(input, ABS_MT_ORIENTATION, MAX_FINGER_ORIENTATION - raw2int(f->orientation)); input_report_abs(input, ABS_MT_POSITION_X, pos->x); input_report_abs(input, ABS_MT_POSITION_Y, pos->y); } static void report_synaptics_data(struct input_dev *input, const struct bcm5974_config *cfg, const struct tp_finger *f, int raw_n) { int abs_p = 0, abs_w = 0; if (raw_n) { int p = raw2int(f->touch_major); int w = raw2int(f->tool_major); if (p > 0 && raw2int(f->origin)) { abs_p = clamp_val(256 * p / cfg->p.max, 0, 255); abs_w = clamp_val(16 * w / cfg->w.max, 0, 15); } } input_report_abs(input, ABS_PRESSURE, abs_p); input_report_abs(input, ABS_TOOL_WIDTH, abs_w); } /* report trackpad data as logical trackpad state */ static int report_tp_state(struct bcm5974 *dev, int size) { const struct bcm5974_config *c = &dev->cfg; const struct tp_finger *f; struct input_dev *input = dev->input; int raw_n, i, n = 0; if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0) return -EIO; raw_n = (size - c->tp_header) / c->tp_fsize; for (i = 0; i < raw_n; i++) { f = get_tp_finger(dev, i); if (raw2int(f->touch_major) == 0) continue; dev->pos[n].x = raw2int(f->abs_x); dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y); dev->index[n++] = f; } input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); for (i = 0; i < n; i++) report_finger_data(input, dev->slots[i], &dev->pos[i], dev->index[i]); input_mt_sync_frame(input); report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n); /* later types report button events via integrated button only */ if (c->caps & HAS_INTEGRATED_BUTTON) { int ibt = raw2int(dev->tp_data[c->tp_button]); input_report_key(input, BTN_LEFT, ibt); } input_sync(input); return 0; } static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) { const struct bcm5974_config *c = &dev->cfg; int retval = 0, size; char *data; /* Type 3 does not require a mode switch */ if (c->tp_type == TYPE3) return 0; data = kmalloc(c->um_size, GFP_KERNEL); if (!data) { dev_err(&dev->intf->dev, "out of memory\n"); retval = -ENOMEM; goto out; } /* read configuration */ size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, c->um_req_val, c->um_req_idx, data, c->um_size, 5000); if (size != c->um_size) { dev_err(&dev->intf->dev, "could not read from device\n"); retval = -EIO; goto out; } /* apply the mode switch */ data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off; /* write configuration */ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, c->um_req_val, c->um_req_idx, data, c->um_size, 5000); if (size != c->um_size) { dev_err(&dev->intf->dev, "could not write to device\n"); retval = -EIO; goto out; } dprintk(2, "bcm5974: switched to %s mode.\n", on ? "wellspring" : "normal"); out: kfree(data); return retval; } static void bcm5974_irq_button(struct urb *urb) { struct bcm5974 *dev = urb->context; struct usb_interface *intf = dev->intf; int error; switch (urb->status) { case 0: break; case -EOVERFLOW: case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dev_dbg(&intf->dev, "button urb shutting down: %d\n", urb->status); return; default: dev_dbg(&intf->dev, "button urb status: %d\n", urb->status); goto exit; } if (report_bt_state(dev, dev->bt_urb->actual_length)) dprintk(1, "bcm5974: bad button package, length: %d\n", dev->bt_urb->actual_length); exit: error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC); if (error) dev_err(&intf->dev, "button urb failed: %d\n", error); } static void bcm5974_irq_trackpad(struct urb *urb) { struct bcm5974 *dev = urb->context; struct usb_interface *intf = dev->intf; int error; switch (urb->status) { case 0: break; case -EOVERFLOW: case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dev_dbg(&intf->dev, "trackpad urb shutting down: %d\n", urb->status); return; default: dev_dbg(&intf->dev, "trackpad urb status: %d\n", urb->status); goto exit; } /* control response ignored */ if (dev->tp_urb->actual_length == 2) goto exit; if (report_tp_state(dev, dev->tp_urb->actual_length)) dprintk(1, "bcm5974: bad trackpad package, length: %d\n", dev->tp_urb->actual_length); exit: error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC); if (error) dev_err(&intf->dev, "trackpad urb failed: %d\n", error); } /* * The Wellspring trackpad, like many recent Apple trackpads, share * the usb device with the keyboard. Since keyboards are usually * handled by the HID system, the device ends up being handled by two * modules. Setting up the device therefore becomes slightly * complicated. To enable multitouch features, a mode switch is * required, which is usually applied via the control interface of the * device. It can be argued where this switch should take place. In * some drivers, like appletouch, the switch is made during * probe. However, the hid module may also alter the state of the * device, resulting in trackpad malfunction under certain * circumstances. To get around this problem, there is at least one * example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to * receive a reset_resume request rather than the normal resume. * Since the implementation of reset_resume is equal to mode switch * plus start_traffic, it seems easier to always do the switch when * starting traffic on the device. */ static int bcm5974_start_traffic(struct bcm5974 *dev) { int error; error = bcm5974_wellspring_mode(dev, true); if (error) { dprintk(1, "bcm5974: mode switch failed\n"); goto err_out; } if (dev->bt_urb) { error = usb_submit_urb(dev->bt_urb, GFP_KERNEL); if (error) goto err_reset_mode; } error = usb_submit_urb(dev->tp_urb, GFP_KERNEL); if (error) goto err_kill_bt; return 0; err_kill_bt: usb_kill_urb(dev->bt_urb); err_reset_mode: bcm5974_wellspring_mode(dev, false); err_out: return error; } static void bcm5974_pause_traffic(struct bcm5974 *dev) { usb_kill_urb(dev->tp_urb); usb_kill_urb(dev->bt_urb); bcm5974_wellspring_mode(dev, false); } /* * The code below implements open/close and manual suspend/resume. * All functions may be called in random order. * * Opening a suspended device fails with EACCES - permission denied. * * Failing a resume leaves the device resumed but closed. */ static int bcm5974_open(struct input_dev *input) { struct bcm5974 *dev = input_get_drvdata(input); int error; error = usb_autopm_get_interface(dev->intf); if (error) return error; scoped_guard(mutex, &dev->pm_mutex) { error = bcm5974_start_traffic(dev); if (!error) dev->opened = 1; } if (error) usb_autopm_put_interface(dev->intf); return error; } static void bcm5974_close(struct input_dev *input) { struct bcm5974 *dev = input_get_drvdata(input); scoped_guard(mutex, &dev->pm_mutex) { bcm5974_pause_traffic(dev); dev->opened = 0; } usb_autopm_put_interface(dev->intf); } static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message) { struct bcm5974 *dev = usb_get_intfdata(iface); guard(mutex)(&dev->pm_mutex); if (dev->opened) bcm5974_pause_traffic(dev); return 0; } static int bcm5974_resume(struct usb_interface *iface) { struct bcm5974 *dev = usb_get_intfdata(iface); guard(mutex)(&dev->pm_mutex); if (dev->opened) return bcm5974_start_traffic(dev); return 0; } static int bcm5974_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(iface); const struct bcm5974_config *cfg; struct bcm5974 *dev; struct input_dev *input_dev; int error = -ENOMEM; /* find the product index */ cfg = bcm5974_get_config(udev); /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); input_dev = input_allocate_device(); if (!dev || !input_dev) { dev_err(&iface->dev, "out of memory\n"); goto err_free_devs; } dev->udev = udev; dev->intf = iface; dev->input = input_dev; dev->cfg = *cfg; mutex_init(&dev->pm_mutex); /* setup urbs */ if (cfg->tp_type == TYPE1) { dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->bt_urb) goto err_free_devs; } dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->tp_urb) goto err_free_bt_urb; if (dev->bt_urb) { dev->bt_data = usb_alloc_coherent(dev->udev, dev->cfg.bt_datalen, GFP_KERNEL, &dev->bt_urb->transfer_dma); if (!dev->bt_data) goto err_free_urb; } dev->tp_data = usb_alloc_coherent(dev->udev, dev->cfg.tp_datalen, GFP_KERNEL, &dev->tp_urb->transfer_dma); if (!dev->tp_data) goto err_free_bt_buffer; if (dev->bt_urb) { usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); input_dev->name = "bcm5974"; input_dev->phys = dev->phys; usb_to_input_id(dev->udev, &input_dev->id); /* report driver capabilities via the version field */ input_dev->id.version = cfg->caps; input_dev->dev.parent = &iface->dev; input_set_drvdata(input_dev, dev); input_dev->open = bcm5974_open; input_dev->close = bcm5974_close; setup_events_to_report(input_dev, cfg); error = input_register_device(dev->input); if (error) goto err_free_buffer; /* save our data pointer in this interface device */ usb_set_intfdata(iface, dev); return 0; err_free_buffer: usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); err_free_bt_buffer: if (dev->bt_urb) usb_free_coherent(dev->udev, dev->cfg.bt_datalen, dev->bt_data, dev->bt_urb->transfer_dma); err_free_urb: usb_free_urb(dev->tp_urb); err_free_bt_urb: usb_free_urb(dev->bt_urb); err_free_devs: usb_set_intfdata(iface, NULL); input_free_device(input_dev); kfree(dev); return error; } static void bcm5974_disconnect(struct usb_interface *iface) { struct bcm5974 *dev = usb_get_intfdata(iface); usb_set_intfdata(iface, NULL); input_unregister_device(dev->input); usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); if (dev->bt_urb) usb_free_coherent(dev->udev, dev->cfg.bt_datalen, dev->bt_data, dev->bt_urb->transfer_dma); usb_free_urb(dev->tp_urb); usb_free_urb(dev->bt_urb); kfree(dev); } static struct usb_driver bcm5974_driver = { .name = "bcm5974", .probe = bcm5974_probe, .disconnect = bcm5974_disconnect, .suspend = bcm5974_suspend, .resume = bcm5974_resume, .id_table = bcm5974_table, .supports_autosuspend = 1, }; module_usb_driver(bcm5974_driver);
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/mii.h: definitions for MII-compatible transceivers * Originally drivers/net/sunhme.h. * * Copyright (C) 1996, 1999, 2001 David S. Miller (davem@redhat.com) */ #ifndef __LINUX_MII_H__ #define __LINUX_MII_H__ #include <linux/if.h> #include <linux/linkmode.h> #include <uapi/linux/mii.h> struct ethtool_cmd; struct mii_if_info { int phy_id; int advertising; int phy_id_mask; int reg_num_mask; unsigned int full_duplex : 1; /* is full duplex? */ unsigned int force_media : 1; /* is autoneg. disabled? */ unsigned int supports_gmii : 1; /* are GMII registers supported? */ struct net_device *dev; int (*mdio_read) (struct net_device *dev, int phy_id, int location); void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); }; extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern void mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern int mii_ethtool_set_link_ksettings( struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd); extern int mii_check_gmii_support(struct mii_if_info *mii); extern void mii_check_link (struct mii_if_info *mii); extern unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, unsigned int init_media); extern int generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_changed); static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) { return (struct mii_ioctl_data *) &rq->ifr_ifru; } /** * mii_nway_result * @negotiated: value of MII ANAR and'd with ANLPAR * * Given a set of MII abilities, check each bit and returns the * currently supported media, in the priority order defined by * IEEE 802.3u. We use LPA_xxx constants but note this is not the * value of LPA solely, as described above. * * The one exception to IEEE 802.3u is that 100baseT4 is placed * between 100T-full and 100T-half. If your phy does not support * 100T4 this is fine. If your phy places 100T4 elsewhere in the * priority order, you will need to roll your own function. */ static inline unsigned int mii_nway_result (unsigned int negotiated) { unsigned int ret; if (negotiated & LPA_100FULL) ret = LPA_100FULL; else if (negotiated & LPA_100BASE4) ret = LPA_100BASE4; else if (negotiated & LPA_100HALF) ret = LPA_100HALF; else if (negotiated & LPA_10FULL) ret = LPA_10FULL; else ret = LPA_10HALF; return ret; } /** * mii_duplex * @duplex_lock: Non-zero if duplex is locked at full * @negotiated: value of MII ANAR and'd with ANLPAR * * A small helper function for a common case. Returns one * if the media is operating or locked at full duplex, and * returns zero otherwise. */ static inline unsigned int mii_duplex (unsigned int duplex_lock, unsigned int negotiated) { if (duplex_lock) return 1; if (mii_nway_result(negotiated) & LPA_DUPLEX) return 1; return 0; } /** * ethtool_adv_to_mii_adv_t * @ethadv: the ethtool advertisement settings * * A small helper function that translates ethtool advertisement * settings to phy autonegotiation advertisements for the * MII_ADVERTISE register. */ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) { u32 result = 0; if (ethadv & ADVERTISED_10baseT_Half) result |= ADVERTISE_10HALF; if (ethadv & ADVERTISED_10baseT_Full) result |= ADVERTISE_10FULL; if (ethadv & ADVERTISED_100baseT_Half) result |= ADVERTISE_100HALF; if (ethadv & ADVERTISED_100baseT_Full) result |= ADVERTISE_100FULL; if (ethadv & ADVERTISED_Pause) result |= ADVERTISE_PAUSE_CAP; if (ethadv & ADVERTISED_Asym_Pause) result |= ADVERTISE_PAUSE_ASYM; return result; } /** * linkmode_adv_to_mii_adv_t * @advertising: the linkmode advertisement settings * * A small helper function that translates linkmode advertisement * settings to phy autonegotiation advertisements for the * MII_ADVERTISE register. */ static inline u32 linkmode_adv_to_mii_adv_t(const unsigned long *advertising) { u32 result = 0; if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising)) result |= ADVERTISE_10HALF; if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising)) result |= ADVERTISE_10FULL; if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising)) result |= ADVERTISE_100HALF; if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising)) result |= ADVERTISE_100FULL; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) result |= ADVERTISE_PAUSE_CAP; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) result |= ADVERTISE_PAUSE_ASYM; return result; } /** * mii_adv_to_ethtool_adv_t * @adv: value of the MII_ADVERTISE register * * A small helper function that translates MII_ADVERTISE bits * to ethtool advertisement settings. */ static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) { u32 result = 0; if (adv & ADVERTISE_10HALF) result |= ADVERTISED_10baseT_Half; if (adv & ADVERTISE_10FULL) result |= ADVERTISED_10baseT_Full; if (adv & ADVERTISE_100HALF) result |= ADVERTISED_100baseT_Half; if (adv & ADVERTISE_100FULL) result |= ADVERTISED_100baseT_Full; if (adv & ADVERTISE_PAUSE_CAP) result |= ADVERTISED_Pause; if (adv & ADVERTISE_PAUSE_ASYM) result |= ADVERTISED_Asym_Pause; return result; } /** * ethtool_adv_to_mii_ctrl1000_t * @ethadv: the ethtool advertisement settings * * A small helper function that translates ethtool advertisement * settings to phy autonegotiation advertisements for the * MII_CTRL1000 register when in 1000T mode. */ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) { u32 result = 0; if (ethadv & ADVERTISED_1000baseT_Half) result |= ADVERTISE_1000HALF; if (ethadv & ADVERTISED_1000baseT_Full) result |= ADVERTISE_1000FULL; return result; } /** * linkmode_adv_to_mii_ctrl1000_t * @advertising: the linkmode advertisement settings * * A small helper function that translates linkmode advertisement * settings to phy autonegotiation advertisements for the * MII_CTRL1000 register when in 1000T mode. */ static inline u32 linkmode_adv_to_mii_ctrl1000_t(const unsigned long *advertising) { u32 result = 0; if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising)) result |= ADVERTISE_1000HALF; if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising)) result |= ADVERTISE_1000FULL; return result; } /** * mii_ctrl1000_to_ethtool_adv_t * @adv: value of the MII_CTRL1000 register * * A small helper function that translates MII_CTRL1000 * bits, when in 1000Base-T mode, to ethtool * advertisement settings. */ static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv) { u32 result = 0; if (adv & ADVERTISE_1000HALF) result |= ADVERTISED_1000baseT_Half; if (adv & ADVERTISE_1000FULL) result |= ADVERTISED_1000baseT_Full; return result; } /** * mii_lpa_to_ethtool_lpa_t * @adv: value of the MII_LPA register * * A small helper function that translates MII_LPA * bits, when in 1000Base-T mode, to ethtool * LP advertisement settings. */ static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) { u32 result = 0; if (lpa & LPA_LPACK) result |= ADVERTISED_Autoneg; return result | mii_adv_to_ethtool_adv_t(lpa); } /** * mii_stat1000_to_ethtool_lpa_t * @adv: value of the MII_STAT1000 register * * A small helper function that translates MII_STAT1000 * bits, when in 1000Base-T mode, to ethtool * advertisement settings. */ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) { u32 result = 0; if (lpa & LPA_1000HALF) result |= ADVERTISED_1000baseT_Half; if (lpa & LPA_1000FULL) result |= ADVERTISED_1000baseT_Full; return result; } /** * mii_stat1000_mod_linkmode_lpa_t * @advertising: target the linkmode advertisement settings * @adv: value of the MII_STAT1000 register * * A small helper function that translates MII_STAT1000 bits, when in * 1000Base-T mode, to linkmode advertisement settings. Other bits in * advertising are not changes. */ static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising, u32 lpa) { linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising, lpa & LPA_1000HALF); linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising, lpa & LPA_1000FULL); } /** * ethtool_adv_to_mii_adv_x * @ethadv: the ethtool advertisement settings * * A small helper function that translates ethtool advertisement * settings to phy autonegotiation advertisements for the * MII_CTRL1000 register when in 1000Base-X mode. */ static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv) { u32 result = 0; if (ethadv & ADVERTISED_1000baseT_Half) result |= ADVERTISE_1000XHALF; if (ethadv & ADVERTISED_1000baseT_Full) result |= ADVERTISE_1000XFULL; if (ethadv & ADVERTISED_Pause) result |= ADVERTISE_1000XPAUSE; if (ethadv & ADVERTISED_Asym_Pause) result |= ADVERTISE_1000XPSE_ASYM; return result; } /** * mii_adv_to_ethtool_adv_x * @adv: value of the MII_CTRL1000 register * * A small helper function that translates MII_CTRL1000 * bits, when in 1000Base-X mode, to ethtool * advertisement settings. */ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) { u32 result = 0; if (adv & ADVERTISE_1000XHALF) result |= ADVERTISED_1000baseT_Half; if (adv & ADVERTISE_1000XFULL) result |= ADVERTISED_1000baseT_Full; if (adv & ADVERTISE_1000XPAUSE) result |= ADVERTISED_Pause; if (adv & ADVERTISE_1000XPSE_ASYM) result |= ADVERTISED_Asym_Pause; return result; } /** * mii_adv_mod_linkmode_adv_t * @advertising:pointer to destination link mode. * @adv: value of the MII_ADVERTISE register * * A small helper function that translates MII_ADVERTISE bits to * linkmode advertisement settings. Leaves other bits unchanged. */ static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising, u32 adv) { linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising, adv & ADVERTISE_10HALF); linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising, adv & ADVERTISE_10FULL); linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising, adv & ADVERTISE_100HALF); linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising, adv & ADVERTISE_100FULL); linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising, adv & ADVERTISE_PAUSE_CAP); linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising, adv & ADVERTISE_PAUSE_ASYM); } /** * mii_adv_to_linkmode_adv_t * @advertising:pointer to destination link mode. * @adv: value of the MII_ADVERTISE register * * A small helper function that translates MII_ADVERTISE bits * to linkmode advertisement settings. Clears the old value * of advertising. */ static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising, u32 adv) { linkmode_zero(advertising); mii_adv_mod_linkmode_adv_t(advertising, adv); } /** * mii_lpa_to_linkmode_lpa_t * @adv: value of the MII_LPA register * * A small helper function that translates MII_LPA bits, when in * 1000Base-T mode, to linkmode LP advertisement settings. Clears the * old value of advertising */ static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising, u32 lpa) { mii_adv_to_linkmode_adv_t(lp_advertising, lpa); if (lpa & LPA_LPACK) linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, lp_advertising); } /** * mii_lpa_mod_linkmode_lpa_t * @adv: value of the MII_LPA register * * A small helper function that translates MII_LPA bits, when in * 1000Base-T mode, to linkmode LP advertisement settings. Leaves * other bits unchanged. */ static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising, u32 lpa) { mii_adv_mod_linkmode_adv_t(lp_advertising, lpa); linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, lp_advertising, lpa & LPA_LPACK); } static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising, u32 ctrl1000) { linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising, ctrl1000 & ADVERTISE_1000HALF); linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising, ctrl1000 & ADVERTISE_1000FULL); } /** * linkmode_adv_to_lcl_adv_t * @advertising:pointer to linkmode advertising * * A small helper function that translates linkmode advertising to LVL * pause capabilities. */ static inline u32 linkmode_adv_to_lcl_adv_t(const unsigned long *advertising) { u32 lcl_adv = 0; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) lcl_adv |= ADVERTISE_PAUSE_CAP; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) lcl_adv |= ADVERTISE_PAUSE_ASYM; return lcl_adv; } /** * mii_lpa_mod_linkmode_x - decode the link partner's config_reg to linkmodes * @linkmodes: link modes array * @lpa: config_reg word from link partner * @fd_bit: link mode for 1000XFULL bit */ static inline void mii_lpa_mod_linkmode_x(unsigned long *linkmodes, u16 lpa, int fd_bit) { linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, linkmodes, lpa & LPA_LPACK); linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes, lpa & LPA_1000XPAUSE); linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes, lpa & LPA_1000XPAUSE_ASYM); linkmode_mod_bit(fd_bit, linkmodes, lpa & LPA_1000XFULL); } /** * linkmode_adv_to_mii_adv_x - encode a linkmode to config_reg * @linkmodes: linkmodes * @fd_bit: full duplex bit */ static inline u16 linkmode_adv_to_mii_adv_x(const unsigned long *linkmodes, int fd_bit) { u16 adv = 0; if (linkmode_test_bit(fd_bit, linkmodes)) adv |= ADVERTISE_1000XFULL; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes)) adv |= ADVERTISE_1000XPAUSE; if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes)) adv |= ADVERTISE_1000XPSE_ASYM; return adv; } /** * mii_advertise_flowctrl - get flow control advertisement flags * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) */ static inline u16 mii_advertise_flowctrl(int cap) { u16 adv = 0; if (cap & FLOW_CTRL_RX) adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; if (cap & FLOW_CTRL_TX) adv ^= ADVERTISE_PAUSE_ASYM; return adv; } /** * mii_resolve_flowctrl_fdx * @lcladv: value of MII ADVERTISE register * @rmtadv: value of MII LPA register * * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3 */ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) { u8 cap = 0; if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) { cap = FLOW_CTRL_TX | FLOW_CTRL_RX; } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) { if (lcladv & ADVERTISE_PAUSE_CAP) cap = FLOW_CTRL_RX; else if (rmtadv & ADVERTISE_PAUSE_CAP) cap = FLOW_CTRL_TX; } return cap; } /** * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value * @speed: a SPEED_* value * @duplex: a DUPLEX_* value * * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10 * Mbps. Unknown duplex values are encoded to half-duplex. */ static inline u16 mii_bmcr_encode_fixed(int speed, int duplex) { u16 bmcr; switch (speed) { case SPEED_2500: case SPEED_1000: bmcr = BMCR_SPEED1000; break; case SPEED_100: bmcr = BMCR_SPEED100; break; case SPEED_10: default: bmcr = BMCR_SPEED10; break; } if (duplex == DUPLEX_FULL) bmcr |= BMCR_FULLDPLX; return bmcr; } #endif /* __LINUX_MII_H__ */
4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include "mt76x0.h" #include "mcu.h" #include "../mt76x02_usb.h" static struct usb_device_id mt76x0_device_table[] = { { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */ { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */ { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */ { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */ { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */ { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */ { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */ { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */ { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */ { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */ { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */ { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */ { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */ { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */ { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */ { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */ { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */ { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */ { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */ { USB_DEVICE(0x2357, 0x0123) }, /* TP-LINK T2UHP_US_v1 */ { USB_DEVICE(0x2357, 0x010b) }, /* TP-LINK T2UHP_UN_v1 */ /* TP-LINK Archer T1U */ { USB_DEVICE(0x2357, 0x0105), .driver_info = 1, }, /* MT7630U */ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7650U */ { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, { 0, } }; static void mt76x0_init_usb_dma(struct mt76x02_dev *dev) { u32 val; val = mt76_rr(dev, MT_USB_DMA_CFG); val |= MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN; /* disable AGGR_BULK_RX in order to receive one * frame in each rx urb and avoid copies */ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN; mt76_wr(dev, MT_USB_DMA_CFG, val); val = mt76_rr(dev, MT_COM_REG0); if (val & 1) dev_dbg(dev->mt76.dev, "MCU not ready\n"); val = mt76_rr(dev, MT_USB_DMA_CFG); val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD; mt76_wr(dev, MT_USB_DMA_CFG, val); val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PAD; mt76_wr(dev, MT_USB_DMA_CFG, val); } static void mt76x0u_cleanup(struct mt76x02_dev *dev) { clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); mt76x0_chip_onoff(dev, false, false); mt76u_queues_deinit(&dev->mt76); } static void mt76x0u_stop(struct ieee80211_hw *hw, bool suspend) { struct mt76x02_dev *dev = hw->priv; clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mphy.mac_work); mt76u_stop_tx(&dev->mt76); mt76x02u_exit_beacon_config(dev); if (test_bit(MT76_REMOVED, &dev->mphy.state)) return; if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000)) dev_warn(dev->mt76.dev, "TX DMA did not stop\n"); mt76x0_mac_stop(dev); if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000)) dev_warn(dev->mt76.dev, "RX DMA did not stop\n"); } static int mt76x0u_start(struct ieee80211_hw *hw) { struct mt76x02_dev *dev = hw->priv; int ret; ret = mt76x02u_mac_start(dev); if (ret) return ret; mt76x0_phy_calibrate(dev, true); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mphy.mac_work, MT_MAC_WORK_INTERVAL); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); set_bit(MT76_STATE_RUNNING, &dev->mphy.state); return 0; } static const struct ieee80211_ops mt76x0u_ops = { .add_chanctx = ieee80211_emulate_add_chanctx, .remove_chanctx = ieee80211_emulate_remove_chanctx, .change_chanctx = ieee80211_emulate_change_chanctx, .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = mt76x02_tx, .start = mt76x0u_start, .stop = mt76x0u_stop, .add_interface = mt76x02_add_interface, .remove_interface = mt76x02_remove_interface, .config = mt76x0_config, .configure_filter = mt76x02_configure_filter, .bss_info_changed = mt76x02_bss_info_changed, .sta_state = mt76_sta_state, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, .set_key = mt76x02_set_key, .conf_tx = mt76x02_conf_tx, .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .ampdu_action = mt76x02_ampdu_action, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .set_rts_threshold = mt76x02_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, .get_txpower = mt76_get_txpower, .get_survey = mt76_get_survey, .set_tim = mt76_set_tim, .release_buffered_frames = mt76_release_buffered_frames, .get_antenna = mt76_get_antenna, .set_sar_specs = mt76x0_set_sar_specs, }; static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset) { int err; mt76x0_chip_onoff(dev, true, reset); if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; err = mt76x0u_mcu_init(dev); if (err < 0) return err; mt76x0_init_usb_dma(dev); err = mt76x0_init_hardware(dev); if (err < 0) return err; mt76x02u_init_beacon_config(dev); mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); return 0; } static int mt76x0u_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = dev->mt76.hw; struct mt76_usb *usb = &dev->mt76.usb; int err; usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE, GFP_KERNEL); if (!usb->mcu.data) return -ENOMEM; err = mt76u_alloc_queues(&dev->mt76); if (err < 0) goto out_err; err = mt76x0u_init_hardware(dev, true); if (err < 0) goto out_err; /* check hw sg support in order to enable AMSDU */ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1; err = mt76x0_register_device(dev); if (err < 0) goto out_err; set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); return 0; out_err: mt76x0u_cleanup(dev); return err; } static int mt76x0u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { .drv_flags = MT_DRV_SW_RX_AIRTIME | MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x0_set_channel, .tx_prepare_skb = mt76x02u_tx_prepare_skb, .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, .rx_skb = mt76x02_queue_rx_skb, .sta_ps = mt76x02_sta_ps, .sta_add = mt76x02_sta_add, .sta_remove = mt76x02_sta_remove, }; struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct mt76x02_dev *dev; struct mt76_dev *mdev; u32 mac_rev; int ret; mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, &drv_ops); if (!mdev) return -ENOMEM; dev = container_of(mdev, struct mt76x02_dev, mt76); mutex_init(&dev->phy_mutex); /* Quirk for Archer T1U */ if (id->driver_info) dev->no_2ghz = true; usb_dev = usb_get_dev(usb_dev); usb_reset_device(usb_dev); usb_set_intfdata(usb_intf, dev); mt76x02u_init_mcu(mdev); ret = mt76u_init(mdev, usb_intf); if (ret) goto err; /* Disable the HW, otherwise MCU fail to initialize on hot reboot */ mt76x0_chip_onoff(dev, false, false); if (!mt76x02_wait_for_mac(mdev)) { ret = -ETIMEDOUT; goto err; } mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); mac_rev = mt76_rr(dev, MT_MAC_CSR0); dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", mdev->rev, mac_rev); if (!is_mt76x0(dev)) { ret = -ENODEV; goto err; } /* Note: vendor driver skips this check for MT76X0U */ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) dev_warn(mdev->dev, "Warning: eFUSE not present\n"); ret = mt76x0u_register_device(dev); if (ret < 0) goto err; return 0; err: usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); mt76u_queues_deinit(&dev->mt76); mt76_free_device(&dev->mt76); return ret; } static void mt76x0_disconnect(struct usb_interface *usb_intf) { struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); if (!initialized) return; ieee80211_unregister_hw(dev->mt76.hw); mt76x0u_cleanup(dev); usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); mt76_free_device(&dev->mt76); } static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state) { struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); mt76u_stop_rx(&dev->mt76); clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); mt76x0_chip_onoff(dev, false, false); return 0; } static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) { struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); int ret; ret = mt76u_resume_rx(&dev->mt76); if (ret < 0) goto err; ret = mt76x0u_init_hardware(dev, false); if (ret) goto err; return 0; err: mt76x0u_cleanup(dev); return ret; } MODULE_DEVICE_TABLE(usb, mt76x0_device_table); MODULE_FIRMWARE(MT7610E_FIRMWARE); MODULE_FIRMWARE(MT7610U_FIRMWARE); MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver"); MODULE_LICENSE("GPL"); static struct usb_driver mt76x0_driver = { .name = KBUILD_MODNAME, .id_table = mt76x0_device_table, .probe = mt76x0u_probe, .disconnect = mt76x0_disconnect, .suspend = mt76x0_suspend, .resume = mt76x0_resume, .reset_resume = mt76x0_resume, .soft_unbind = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(mt76x0_driver);
4 4 1 2 3 2 2 2 1 2 2 2 2 2 1 2 2 2 2 2 2 2 1 9 9 4 5 4 2 8 2 2 2 2 2 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Steelseries devices * * Copyright (c) 2013 Simon Wood * Copyright (c) 2023 Bastien Nocera */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/leds.h> #include "hid-ids.h" #define STEELSERIES_SRWS1 BIT(0) #define STEELSERIES_ARCTIS_1 BIT(1) #define STEELSERIES_ARCTIS_9 BIT(2) struct steelseries_device { struct hid_device *hdev; unsigned long quirks; struct delayed_work battery_work; spinlock_t lock; bool removed; struct power_supply_desc battery_desc; struct power_supply *battery; uint8_t battery_capacity; bool headset_connected; bool battery_charging; }; #if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) #define SRWS1_NUMBER_LEDS 15 struct steelseries_srws1_data { __u16 led_state; /* the last element is used for setting all leds simultaneously */ struct led_classdev *led[SRWS1_NUMBER_LEDS + 1]; }; #endif /* Fixed report descriptor for Steelseries SRW-S1 wheel controller * * The original descriptor hides the sensitivity and assists dials * a custom vendor usage page. This inserts a patch to make them * appear in the 'Generic Desktop' usage. */ static const __u8 steelseries_srws1_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop) */ 0x09, 0x08, /* Usage (MultiAxis), Changed */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x05, 0x01, /* Changed Usage Page (Desktop), */ 0x09, 0x30, /* Changed Usage (X), */ 0x16, 0xF8, 0xF8, /* Logical Minimum (-1800), */ 0x26, 0x08, 0x07, /* Logical Maximum (1800), */ 0x65, 0x14, /* Unit (Degrees), */ 0x55, 0x0F, /* Unit Exponent (15), */ 0x75, 0x10, /* Report Size (16), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Changed Usage (Y), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x75, 0x0C, /* Report Size (12), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Changed Usage (Z), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x75, 0x0C, /* Report Size (12), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x65, 0x14, /* Unit (Degrees), */ 0x75, 0x04, /* Report Size (4), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x65, 0x00, /* Unit, */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x11, /* Usage Maximum (11h), */ 0x95, 0x11, /* Report Count (17), */ 0x81, 0x02, /* Input (Variable), */ /* ---- Dial patch starts here ---- */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x33, /* Usage (RX), */ 0x75, 0x04, /* Report Size (4), */ 0x95, 0x02, /* Report Count (2), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x0b, /* Logical Maximum (b), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x35, /* Usage (RZ), */ 0x75, 0x04, /* Report Size (4), */ 0x95, 0x01, /* Report Count (1), */ 0x25, 0x03, /* Logical Maximum (3), */ 0x81, 0x02, /* Input (Variable), */ /* ---- Dial patch ends here ---- */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x75, 0x04, /* Changed Report Size (4), */ 0x95, 0x0D, /* Changed Report Count (13), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x10, /* Report Count (16), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; #if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds) { struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); __s32 *value = report->field[0]->value; value[0] = 0x40; value[1] = leds & 0xFF; value[2] = leds >> 8; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; value[7] = 0x00; value[8] = 0x00; value[9] = 0x00; value[10] = 0x00; value[11] = 0x00; value[12] = 0x00; value[13] = 0x00; value[14] = 0x00; value[15] = 0x00; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); /* Note: LED change does not show on device until the device is read/polled */ } static void steelseries_srws1_led_all_set_brightness(struct led_classdev *led_cdev, enum led_brightness value) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Device data not found."); return; } if (value == LED_OFF) drv_data->led_state = 0; else drv_data->led_state = (1 << (SRWS1_NUMBER_LEDS + 1)) - 1; steelseries_srws1_set_leds(hid, drv_data->led_state); } static enum led_brightness steelseries_srws1_led_all_get_brightness(struct led_classdev *led_cdev) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct steelseries_srws1_data *drv_data; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Device data not found."); return LED_OFF; } return (drv_data->led_state >> SRWS1_NUMBER_LEDS) ? LED_FULL : LED_OFF; } static void steelseries_srws1_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness value) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct steelseries_srws1_data *drv_data = hid_get_drvdata(hid); int i, state = 0; if (!drv_data) { hid_err(hid, "Device data not found."); return; } for (i = 0; i < SRWS1_NUMBER_LEDS; i++) { if (led_cdev != drv_data->led[i]) continue; state = (drv_data->led_state >> i) & 1; if (value == LED_OFF && state) { drv_data->led_state &= ~(1 << i); steelseries_srws1_set_leds(hid, drv_data->led_state); } else if (value != LED_OFF && !state) { drv_data->led_state |= 1 << i; steelseries_srws1_set_leds(hid, drv_data->led_state); } break; } } static enum led_brightness steelseries_srws1_led_get_brightness(struct led_classdev *led_cdev) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct steelseries_srws1_data *drv_data; int i, value = 0; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Device data not found."); return LED_OFF; } for (i = 0; i < SRWS1_NUMBER_LEDS; i++) if (led_cdev == drv_data->led[i]) { value = (drv_data->led_state >> i) & 1; break; } return value ? LED_FULL : LED_OFF; } static int steelseries_srws1_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret, i; struct led_classdev *led; size_t name_sz; char *name; struct steelseries_srws1_data *drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); if (drv_data == NULL) { hid_err(hdev, "can't alloc SRW-S1 memory\n"); return -ENOMEM; } hid_set_drvdata(hdev, drv_data); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) { ret = -ENODEV; goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } /* register led subsystem */ drv_data->led_state = 0; for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) drv_data->led[i] = NULL; steelseries_srws1_set_leds(hdev, 0); name_sz = strlen(hdev->uniq) + 16; /* 'ALL', for setting all LEDs simultaneously */ led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL); if (!led) { hid_err(hdev, "can't allocate memory for LED ALL\n"); goto err_led; } name = (void *)(&led[1]); snprintf(name, name_sz, "SRWS1::%s::RPMALL", hdev->uniq); led->name = name; led->brightness = 0; led->max_brightness = 1; led->brightness_get = steelseries_srws1_led_all_get_brightness; led->brightness_set = steelseries_srws1_led_all_set_brightness; drv_data->led[SRWS1_NUMBER_LEDS] = led; ret = led_classdev_register(&hdev->dev, led); if (ret) goto err_led; /* Each individual LED */ for (i = 0; i < SRWS1_NUMBER_LEDS; i++) { led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL); if (!led) { hid_err(hdev, "can't allocate memory for LED %d\n", i); goto err_led; } name = (void *)(&led[1]); snprintf(name, name_sz, "SRWS1::%s::RPM%d", hdev->uniq, i+1); led->name = name; led->brightness = 0; led->max_brightness = 1; led->brightness_get = steelseries_srws1_led_get_brightness; led->brightness_set = steelseries_srws1_led_set_brightness; drv_data->led[i] = led; ret = led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "failed to register LED %d. Aborting.\n", i); err_led: /* Deregister all LEDs (if any) */ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) { led = drv_data->led[i]; drv_data->led[i] = NULL; if (!led) continue; led_classdev_unregister(led); kfree(led); } goto out; /* but let the driver continue without LEDs */ } } out: return 0; err_free: kfree(drv_data); return ret; } static void steelseries_srws1_remove(struct hid_device *hdev) { int i; struct led_classdev *led; struct steelseries_srws1_data *drv_data = hid_get_drvdata(hdev); if (drv_data) { /* Deregister LEDs (if any) */ for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) { led = drv_data->led[i]; drv_data->led[i] = NULL; if (!led) continue; led_classdev_unregister(led); kfree(led); } } hid_hw_stop(hdev); kfree(drv_data); } #endif #define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000 #define ARCTIS_1_BATTERY_RESPONSE_LEN 8 #define ARCTIS_9_BATTERY_RESPONSE_LEN 64 static const char arctis_1_battery_request[] = { 0x06, 0x12 }; static const char arctis_9_battery_request[] = { 0x00, 0x20 }; static int steelseries_headset_request_battery(struct hid_device *hdev, const char *request, size_t len) { u8 *write_buf; int ret; /* Request battery information */ write_buf = kmemdup(request, len, GFP_KERNEL); if (!write_buf) return -ENOMEM; hid_dbg(hdev, "Sending battery request report"); ret = hid_hw_raw_request(hdev, request[0], write_buf, len, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); if (ret < (int)len) { hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret); ret = -ENODATA; } kfree(write_buf); return ret; } static void steelseries_headset_fetch_battery(struct hid_device *hdev) { struct steelseries_device *sd = hid_get_drvdata(hdev); int ret = 0; if (sd->quirks & STEELSERIES_ARCTIS_1) ret = steelseries_headset_request_battery(hdev, arctis_1_battery_request, sizeof(arctis_1_battery_request)); else if (sd->quirks & STEELSERIES_ARCTIS_9) ret = steelseries_headset_request_battery(hdev, arctis_9_battery_request, sizeof(arctis_9_battery_request)); if (ret < 0) hid_dbg(hdev, "Battery query failed (err: %d)\n", ret); } static int battery_capacity_to_level(int capacity) { if (capacity >= 50) return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; if (capacity >= 20) return POWER_SUPPLY_CAPACITY_LEVEL_LOW; return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; } static void steelseries_headset_battery_timer_tick(struct work_struct *work) { struct steelseries_device *sd = container_of(work, struct steelseries_device, battery_work.work); struct hid_device *hdev = sd->hdev; steelseries_headset_fetch_battery(hdev); } #define STEELSERIES_PREFIX "SteelSeries " #define STEELSERIES_PREFIX_LEN strlen(STEELSERIES_PREFIX) static int steelseries_headset_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct steelseries_device *sd = power_supply_get_drvdata(psy); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = sd->hdev->name; while (!strncmp(val->strval, STEELSERIES_PREFIX, STEELSERIES_PREFIX_LEN)) val->strval += STEELSERIES_PREFIX_LEN; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = "SteelSeries"; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_STATUS: if (sd->headset_connected) { val->intval = sd->battery_charging ? POWER_SUPPLY_STATUS_CHARGING : POWER_SUPPLY_STATUS_DISCHARGING; } else val->intval = POWER_SUPPLY_STATUS_UNKNOWN; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = sd->battery_capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = battery_capacity_to_level(sd->battery_capacity); break; default: ret = -EINVAL; break; } return ret; } static void steelseries_headset_set_wireless_status(struct hid_device *hdev, bool connected) { struct usb_interface *intf; if (!hid_is_usb(hdev)) return; intf = to_usb_interface(hdev->dev.parent); usb_set_wireless_status(intf, connected ? USB_WIRELESS_STATUS_CONNECTED : USB_WIRELESS_STATUS_DISCONNECTED); } static enum power_supply_property steelseries_headset_battery_props[] = { POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CAPACITY_LEVEL, }; static int steelseries_headset_battery_register(struct steelseries_device *sd) { static atomic_t battery_no = ATOMIC_INIT(0); struct power_supply_config battery_cfg = { .drv_data = sd, }; unsigned long n; int ret; sd->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; sd->battery_desc.properties = steelseries_headset_battery_props; sd->battery_desc.num_properties = ARRAY_SIZE(steelseries_headset_battery_props); sd->battery_desc.get_property = steelseries_headset_battery_get_property; sd->battery_desc.use_for_apm = 0; n = atomic_inc_return(&battery_no) - 1; sd->battery_desc.name = devm_kasprintf(&sd->hdev->dev, GFP_KERNEL, "steelseries_headset_battery_%ld", n); if (!sd->battery_desc.name) return -ENOMEM; /* avoid the warning of 0% battery while waiting for the first info */ steelseries_headset_set_wireless_status(sd->hdev, false); sd->battery_capacity = 100; sd->battery_charging = false; sd->battery = devm_power_supply_register(&sd->hdev->dev, &sd->battery_desc, &battery_cfg); if (IS_ERR(sd->battery)) { ret = PTR_ERR(sd->battery); hid_err(sd->hdev, "%s:power_supply_register failed with error %d\n", __func__, ret); return ret; } power_supply_powers(sd->battery, &sd->hdev->dev); INIT_DELAYED_WORK(&sd->battery_work, steelseries_headset_battery_timer_tick); steelseries_headset_fetch_battery(sd->hdev); if (sd->quirks & STEELSERIES_ARCTIS_9) { /* The first fetch_battery request can remain unanswered in some cases */ schedule_delayed_work(&sd->battery_work, msecs_to_jiffies(STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS)); } return 0; } static bool steelseries_is_vendor_usage_page(struct hid_device *hdev, uint8_t usage_page) { return hdev->rdesc[0] == 0x06 && hdev->rdesc[1] == usage_page && hdev->rdesc[2] == 0xff; } static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct steelseries_device *sd; int ret; sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL); if (!sd) return -ENOMEM; hid_set_drvdata(hdev, sd); sd->hdev = hdev; sd->quirks = id->driver_data; if (sd->quirks & STEELSERIES_SRWS1) { #if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) return steelseries_srws1_probe(hdev, id); #else return -ENODEV; #endif } ret = hid_parse(hdev); if (ret) return ret; if (sd->quirks & STEELSERIES_ARCTIS_9 && !steelseries_is_vendor_usage_page(hdev, 0xc0)) return -ENODEV; spin_lock_init(&sd->lock); ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) return ret; ret = hid_hw_open(hdev); if (ret) return ret; if (steelseries_headset_battery_register(sd) < 0) hid_err(sd->hdev, "Failed to register battery for headset\n"); return ret; } static void steelseries_remove(struct hid_device *hdev) { struct steelseries_device *sd = hid_get_drvdata(hdev); unsigned long flags; if (sd->quirks & STEELSERIES_SRWS1) { #if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) steelseries_srws1_remove(hdev); #endif return; } spin_lock_irqsave(&sd->lock, flags); sd->removed = true; spin_unlock_irqrestore(&sd->lock, flags); cancel_delayed_work_sync(&sd->battery_work); hid_hw_close(hdev); hid_hw_stop(hdev); } static const __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (hdev->vendor != USB_VENDOR_ID_STEELSERIES || hdev->product != USB_DEVICE_ID_STEELSERIES_SRWS1) return rdesc; if (*rsize >= 115 && rdesc[11] == 0x02 && rdesc[13] == 0xc8 && rdesc[29] == 0xbb && rdesc[40] == 0xc5) { hid_info(hdev, "Fixing up Steelseries SRW-S1 report descriptor\n"); *rsize = sizeof(steelseries_srws1_rdesc_fixed); return steelseries_srws1_rdesc_fixed; } return rdesc; } static uint8_t steelseries_headset_map_capacity(uint8_t capacity, uint8_t min_in, uint8_t max_in) { if (capacity >= max_in) return 100; if (capacity <= min_in) return 0; return (capacity - min_in) * 100 / (max_in - min_in); } static int steelseries_headset_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *read_buf, int size) { struct steelseries_device *sd = hid_get_drvdata(hdev); int capacity = sd->battery_capacity; bool connected = sd->headset_connected; bool charging = sd->battery_charging; unsigned long flags; /* Not a headset */ if (sd->quirks & STEELSERIES_SRWS1) return 0; if (sd->quirks & STEELSERIES_ARCTIS_1) { hid_dbg(sd->hdev, "Parsing raw event for Arctis 1 headset (%*ph)\n", size, read_buf); if (size < ARCTIS_1_BATTERY_RESPONSE_LEN || memcmp(read_buf, arctis_1_battery_request, sizeof(arctis_1_battery_request))) { if (!delayed_work_pending(&sd->battery_work)) goto request_battery; return 0; } if (read_buf[2] == 0x01) { connected = false; capacity = 100; } else { connected = true; capacity = read_buf[3]; } } if (sd->quirks & STEELSERIES_ARCTIS_9) { hid_dbg(sd->hdev, "Parsing raw event for Arctis 9 headset (%*ph)\n", size, read_buf); if (size < ARCTIS_9_BATTERY_RESPONSE_LEN) { if (!delayed_work_pending(&sd->battery_work)) goto request_battery; return 0; } if (read_buf[0] == 0xaa && read_buf[1] == 0x01) { connected = true; charging = read_buf[4] == 0x01; /* * Found no official documentation about min and max. * Values defined by testing. */ capacity = steelseries_headset_map_capacity(read_buf[3], 0x68, 0x9d); } else { /* * Device is off and sends the last known status read_buf[1] == 0x03 or * there is no known status of the device read_buf[0] == 0x55 */ connected = false; charging = false; } } if (connected != sd->headset_connected) { hid_dbg(sd->hdev, "Connected status changed from %sconnected to %sconnected\n", sd->headset_connected ? "" : "not ", connected ? "" : "not "); sd->headset_connected = connected; steelseries_headset_set_wireless_status(hdev, connected); } if (capacity != sd->battery_capacity) { hid_dbg(sd->hdev, "Battery capacity changed from %d%% to %d%%\n", sd->battery_capacity, capacity); sd->battery_capacity = capacity; power_supply_changed(sd->battery); } if (charging != sd->battery_charging) { hid_dbg(sd->hdev, "Battery charging status changed from %scharging to %scharging\n", sd->battery_charging ? "" : "not ", charging ? "" : "not "); sd->battery_charging = charging; power_supply_changed(sd->battery); } request_battery: spin_lock_irqsave(&sd->lock, flags); if (!sd->removed) schedule_delayed_work(&sd->battery_work, msecs_to_jiffies(STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS)); spin_unlock_irqrestore(&sd->lock, flags); return 0; } static const struct hid_device_id steelseries_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1), .driver_data = STEELSERIES_SRWS1 }, { /* SteelSeries Arctis 1 Wireless for XBox */ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6), .driver_data = STEELSERIES_ARCTIS_1 }, { /* SteelSeries Arctis 9 Wireless for XBox */ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12c2), .driver_data = STEELSERIES_ARCTIS_9 }, { } }; MODULE_DEVICE_TABLE(hid, steelseries_devices); static struct hid_driver steelseries_driver = { .name = "steelseries", .id_table = steelseries_devices, .probe = steelseries_probe, .remove = steelseries_remove, .report_fixup = steelseries_srws1_report_fixup, .raw_event = steelseries_headset_raw_event, }; module_hid_driver(steelseries_driver); MODULE_DESCRIPTION("HID driver for Steelseries devices"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>"); MODULE_AUTHOR("Simon Wood <simon@mungewell.org>"); MODULE_AUTHOR("Christian Mayer <git@mayer-bgk.de>");
30 30 6 6 6 6 6 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 // SPDX-License-Identifier: GPL-2.0-or-later /* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Abramo Bagnara <abramo@alsa-project.org> */ #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/time.h> #include <linux/math64.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/timer.h> #include "pcm_local.h" #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define CREATE_TRACE_POINTS #include "pcm_trace.h" #else #define trace_hwptr(substream, pos, in_interrupt) #define trace_xrun(substream) #define trace_hw_ptr_error(substream, reason) #define trace_applptr(substream, prev, curr) #endif static int fill_silence_frames(struct snd_pcm_substream *substream, snd_pcm_uframes_t off, snd_pcm_uframes_t frames); static inline void update_silence_vars(struct snd_pcm_runtime *runtime, snd_pcm_uframes_t ptr, snd_pcm_uframes_t new_ptr) { snd_pcm_sframes_t delta; delta = new_ptr - ptr; if (delta == 0) return; if (delta < 0) delta += runtime->boundary; if ((snd_pcm_uframes_t)delta < runtime->silence_filled) runtime->silence_filled -= delta; else runtime->silence_filled = 0; runtime->silence_start = new_ptr; } /* * fill ring buffer with silence * runtime->silence_start: starting pointer to silence area * runtime->silence_filled: size filled with silence * runtime->silence_threshold: threshold from application * runtime->silence_size: maximal size from application * * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately */ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames, ofs, transfer; int err; if (runtime->silence_size < runtime->boundary) { snd_pcm_sframes_t noise_dist; snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr); update_silence_vars(runtime, runtime->silence_start, appl_ptr); /* initialization outside pointer updates */ if (new_hw_ptr == ULONG_MAX) new_hw_ptr = runtime->status->hw_ptr; /* get hw_avail with the boundary crossing */ noise_dist = appl_ptr - new_hw_ptr; if (noise_dist < 0) noise_dist += runtime->boundary; /* total noise distance */ noise_dist += runtime->silence_filled; if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold) return; frames = runtime->silence_threshold - noise_dist; if (frames > runtime->silence_size) frames = runtime->silence_size; } else { /* * This filling mode aims at free-running mode (used for example by dmix), * which doesn't update the application pointer. */ snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr; if (new_hw_ptr == ULONG_MAX) { /* * Initialization, fill the whole unused buffer with silence. * * Usually, this is entered while stopped, before data is queued, * so both pointers are expected to be zero. */ snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr; if (avail < 0) avail += runtime->boundary; /* * In free-running mode, appl_ptr will be zero even while running, * so we end up with a huge number. There is no useful way to * handle this, so we just clear the whole buffer. */ runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail; runtime->silence_start = hw_ptr; } else { /* Silence the just played area immediately */ update_silence_vars(runtime, hw_ptr, new_hw_ptr); } /* * In this mode, silence_filled actually includes the valid * sample data from the user. */ frames = runtime->buffer_size - runtime->silence_filled; } if (snd_BUG_ON(frames > runtime->buffer_size)) return; if (frames == 0) return; ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size; do { transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames; err = fill_silence_frames(substream, ofs, transfer); snd_BUG_ON(err < 0); runtime->silence_filled += transfer; frames -= transfer; ofs = 0; } while (frames > 0); snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE); } #ifdef CONFIG_SND_DEBUG void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *name, size_t len) { snprintf(name, len, "pcmC%dD%d%c:%d", substream->pcm->card->number, substream->pcm->device, substream->stream ? 'c' : 'p', substream->number); } EXPORT_SYMBOL(snd_pcm_debug_name); #endif #define XRUN_DEBUG_BASIC (1<<0) #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */ #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */ #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define xrun_debug(substream, mask) \ ((substream)->pstr->xrun_debug & (mask)) #else #define xrun_debug(substream, mask) 0 #endif #define dump_stack_on_xrun(substream) do { \ if (xrun_debug(substream, XRUN_DEBUG_STACK)) \ dump_stack(); \ } while (0) /* call with stream lock held */ void __snd_pcm_xrun(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; trace_xrun(substream); if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { struct timespec64 tstamp; snd_pcm_gettime(runtime, &tstamp); runtime->status->tstamp.tv_sec = tstamp.tv_sec; runtime->status->tstamp.tv_nsec = tstamp.tv_nsec; } snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { char name[16]; snd_pcm_debug_name(substream, name, sizeof(name)); pcm_warn(substream->pcm, "XRUN: %s\n", name); dump_stack_on_xrun(substream); } #ifdef CONFIG_SND_PCM_XRUN_DEBUG substream->xrun_counter++; #endif } #ifdef CONFIG_SND_PCM_XRUN_DEBUG #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \ do { \ trace_hw_ptr_error(substream, reason); \ if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \ pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \ (in_interrupt) ? 'Q' : 'P', ##args); \ dump_stack_on_xrun(substream); \ } \ } while (0) #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */ #define hw_ptr_error(substream, fmt, args...) do { } while (0) #endif int snd_pcm_update_state(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime) { snd_pcm_uframes_t avail; avail = snd_pcm_avail(substream); if (avail > runtime->avail_max) runtime->avail_max = avail; if (runtime->state == SNDRV_PCM_STATE_DRAINING) { if (avail >= runtime->buffer_size) { snd_pcm_drain_done(substream); return -EPIPE; } } else { if (avail >= runtime->stop_threshold) { __snd_pcm_xrun(substream); return -EPIPE; } } if (runtime->twake) { if (avail >= runtime->twake) wake_up(&runtime->tsleep); } else if (avail >= runtime->control->avail_min) wake_up(&runtime->sleep); return 0; } static void update_audio_tstamp(struct snd_pcm_substream *substream, struct timespec64 *curr_tstamp, struct timespec64 *audio_tstamp) { struct snd_pcm_runtime *runtime = substream->runtime; u64 audio_frames, audio_nsecs; struct timespec64 driver_tstamp; if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE) return; if (!(substream->ops->get_time_info) || (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { /* * provide audio timestamp derived from pointer position * add delay only if requested */ audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr; if (runtime->audio_tstamp_config.report_delay) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) audio_frames -= runtime->delay; else audio_frames += runtime->delay; } audio_nsecs = div_u64(audio_frames * 1000000000LL, runtime->rate); *audio_tstamp = ns_to_timespec64(audio_nsecs); } if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec || runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) { runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec; runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec; runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec; runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec; } /* * re-take a driver timestamp to let apps detect if the reference tstamp * read by low-level hardware was provided with a delay */ snd_pcm_gettime(substream->runtime, &driver_tstamp); runtime->driver_tstamp = driver_tstamp; } static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, unsigned int in_interrupt) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t pos; snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; snd_pcm_sframes_t hdelta, delta; unsigned long jdelta; unsigned long curr_jiffies; struct timespec64 curr_tstamp; struct timespec64 audio_tstamp; int crossed_boundary = 0; old_hw_ptr = runtime->status->hw_ptr; /* * group pointer, time and jiffies reads to allow for more * accurate correlations/corrections. * The values are stored at the end of this routine after * corrections for hw_ptr position */ pos = substream->ops->pointer(substream); curr_jiffies = jiffies; if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { if ((substream->ops->get_time_info) && (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) { substream->ops->get_time_info(substream, &curr_tstamp, &audio_tstamp, &runtime->audio_tstamp_config, &runtime->audio_tstamp_report); /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */ if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT) snd_pcm_gettime(runtime, &curr_tstamp); } else snd_pcm_gettime(runtime, &curr_tstamp); } if (pos == SNDRV_PCM_POS_XRUN) { __snd_pcm_xrun(substream); return -EPIPE; } if (pos >= runtime->buffer_size) { if (printk_ratelimit()) { char name[16]; snd_pcm_debug_name(substream, name, sizeof(name)); pcm_err(substream->pcm, "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n", name, pos, runtime->buffer_size, runtime->period_size); } pos = 0; } pos -= pos % runtime->min_align; trace_hwptr(substream, pos, in_interrupt); hw_base = runtime->hw_ptr_base; new_hw_ptr = hw_base + pos; if (in_interrupt) { /* we know that one period was processed */ /* delta = "expected next hw_ptr" for in_interrupt != 0 */ delta = runtime->hw_ptr_interrupt + runtime->period_size; if (delta > new_hw_ptr) { /* check for double acknowledged interrupts */ hdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; goto __delta; } } } /* new_hw_ptr might be lower than old_hw_ptr in case when */ /* pointer crosses the end of the ring buffer */ if (new_hw_ptr < old_hw_ptr) { hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; } __delta: delta = new_hw_ptr - old_hw_ptr; if (delta < 0) delta += runtime->boundary; if (runtime->no_period_wakeup) { snd_pcm_sframes_t xrun_threshold; /* * Without regular period interrupts, we have to check * the elapsed time to detect xruns. */ jdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) goto no_delta_check; hdelta = jdelta - delta * HZ / runtime->rate; xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1; while (hdelta > xrun_threshold) { delta += runtime->buffer_size; hw_base += runtime->buffer_size; if (hw_base >= runtime->boundary) { hw_base = 0; crossed_boundary++; } new_hw_ptr = hw_base + pos; hdelta -= runtime->hw_ptr_buffer_jiffies; } goto no_delta_check; } /* something must be really wrong */ if (delta >= runtime->buffer_size + runtime->period_size) { hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr", "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", substream->stream, (long)pos, (long)new_hw_ptr, (long)old_hw_ptr); return 0; } /* Do jiffies check only in xrun_debug mode */ if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK)) goto no_jiffies_check; /* Skip the jiffies check for hardwares with BATCH flag. * Such hardware usually just increases the position at each IRQ, * thus it can't give any strange position. */ if (runtime->hw.info & SNDRV_PCM_INFO_BATCH) goto no_jiffies_check; hdelta = delta; if (hdelta < runtime->delay) goto no_jiffies_check; hdelta -= runtime->delay; jdelta = curr_jiffies - runtime->hw_ptr_jiffies; if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { delta = jdelta / (((runtime->period_size * HZ) / runtime->rate) + HZ/100); /* move new_hw_ptr according jiffies not pos variable */ new_hw_ptr = old_hw_ptr; hw_base = delta; /* use loop to avoid checks for delta overflows */ /* the delta value is small or zero in most cases */ while (delta > 0) { new_hw_ptr += runtime->period_size; if (new_hw_ptr >= runtime->boundary) { new_hw_ptr -= runtime->boundary; crossed_boundary--; } delta--; } /* align hw_base to buffer_size */ hw_ptr_error(substream, in_interrupt, "hw_ptr skipping", "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n", (long)pos, (long)hdelta, (long)runtime->period_size, jdelta, ((hdelta * HZ) / runtime->rate), hw_base, (unsigned long)old_hw_ptr, (unsigned long)new_hw_ptr); /* reset values to proper state */ delta = 0; hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size); } no_jiffies_check: if (delta > runtime->period_size + runtime->period_size / 2) { hw_ptr_error(substream, in_interrupt, "Lost interrupts?", "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n", substream->stream, (long)delta, (long)new_hw_ptr, (long)old_hw_ptr); } no_delta_check: if (runtime->status->hw_ptr == new_hw_ptr) { runtime->hw_ptr_jiffies = curr_jiffies; update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return 0; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, new_hw_ptr); if (in_interrupt) { delta = new_hw_ptr - runtime->hw_ptr_interrupt; if (delta < 0) delta += runtime->boundary; delta -= (snd_pcm_uframes_t)delta % runtime->period_size; runtime->hw_ptr_interrupt += delta; if (runtime->hw_ptr_interrupt >= runtime->boundary) runtime->hw_ptr_interrupt -= runtime->boundary; } runtime->hw_ptr_base = hw_base; runtime->status->hw_ptr = new_hw_ptr; runtime->hw_ptr_jiffies = curr_jiffies; if (crossed_boundary) { snd_BUG_ON(crossed_boundary != 1); runtime->hw_ptr_wrap += runtime->boundary; } update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp); return snd_pcm_update_state(substream, runtime); } /* CAUTION: call it with irq disabled */ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream) { return snd_pcm_update_hw_ptr0(substream, 0); } /** * snd_pcm_set_ops - set the PCM operators * @pcm: the pcm instance * @direction: stream direction, SNDRV_PCM_STREAM_XXX * @ops: the operator table * * Sets the given PCM operators to the pcm instance. */ void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, const struct snd_pcm_ops *ops) { struct snd_pcm_str *stream = &pcm->streams[direction]; struct snd_pcm_substream *substream; for (substream = stream->substream; substream != NULL; substream = substream->next) substream->ops = ops; } EXPORT_SYMBOL(snd_pcm_set_ops); /** * snd_pcm_set_sync_per_card - set the PCM sync id with card number * @substream: the pcm substream * @params: modified hardware parameters * @id: identifier (max 12 bytes) * @len: identifier length (max 12 bytes) * * Sets the PCM sync identifier for the card with zero padding. * * User space or any user should use this 16-byte identifier for a comparison only * to check if two IDs are similar or different. Special case is the identifier * containing only zeros. Interpretation for this combination is - empty (not set). * The contents of the identifier should not be interpreted in any other way. * * The synchronization ID must be unique per clock source (usually one sound card, * but multiple soundcard may use one PCM word clock source which means that they * are fully synchronized). * * This routine composes this ID using card number in first four bytes and * 12-byte additional ID. When other ID composition is used (e.g. for multiple * sound cards), make sure that the composition does not clash with this * composition scheme. */ void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, const unsigned char *id, unsigned int len) { *(__u32 *)params->sync = cpu_to_le32(substream->pcm->card->number); len = min(12, len); memcpy(params->sync + 4, id, len); memset(params->sync + 4 + len, 0, 12 - len); } EXPORT_SYMBOL_GPL(snd_pcm_set_sync_per_card); /* * Standard ioctl routine */ static inline unsigned int div32(unsigned int a, unsigned int b, unsigned int *r) { if (b == 0) { *r = 0; return UINT_MAX; } *r = a % b; return a / b; } static inline unsigned int div_down(unsigned int a, unsigned int b) { if (b == 0) return UINT_MAX; return a / b; } static inline unsigned int div_up(unsigned int a, unsigned int b) { unsigned int r; unsigned int q; if (b == 0) return UINT_MAX; q = div32(a, b, &r); if (r) ++q; return q; } static inline unsigned int mul(unsigned int a, unsigned int b) { if (a == 0) return 0; if (div_down(UINT_MAX, a) < b) return UINT_MAX; return a * b; } static inline unsigned int muldiv32(unsigned int a, unsigned int b, unsigned int c, unsigned int *r) { u_int64_t n = (u_int64_t) a * b; if (c == 0) { *r = 0; return UINT_MAX; } n = div_u64_rem(n, c, r); if (n >= UINT_MAX) { *r = 0; return UINT_MAX; } return n; } /** * snd_interval_refine - refine the interval value of configurator * @i: the interval value to refine * @v: the interval value to refer to * * Refines the interval value with the reference value. * The interval is changed to the range satisfying both intervals. * The interval status (min, max, integer, etc.) are evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v) { int changed = 0; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (i->min < v->min) { i->min = v->min; i->openmin = v->openmin; changed = 1; } else if (i->min == v->min && !i->openmin && v->openmin) { i->openmin = 1; changed = 1; } if (i->max > v->max) { i->max = v->max; i->openmax = v->openmax; changed = 1; } else if (i->max == v->max && !i->openmax && v->openmax) { i->openmax = 1; changed = 1; } if (!i->integer && v->integer) { i->integer = 1; changed = 1; } if (i->integer) { if (i->openmin) { i->min++; i->openmin = 0; } if (i->openmax) { i->max--; i->openmax = 0; } } else if (!i->openmin && !i->openmax && i->min == i->max) i->integer = 1; if (snd_interval_checkempty(i)) { snd_interval_none(i); return -EINVAL; } return changed; } EXPORT_SYMBOL(snd_interval_refine); static int snd_interval_refine_first(struct snd_interval *i) { const unsigned int last_max = i->max; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->max = i->min; if (i->openmin) i->max++; /* only exclude max value if also excluded before refine */ i->openmax = (i->openmax && i->max >= last_max); return 1; } static int snd_interval_refine_last(struct snd_interval *i) { const unsigned int last_min = i->min; if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->min = i->max; if (i->openmax) i->min--; /* only exclude min value if also excluded before refine */ i->openmin = (i->openmin && i->min <= last_min); return 1; } void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = mul(a->min, b->min); c->openmin = (a->openmin || b->openmin); c->max = mul(a->max, b->max); c->openmax = (a->openmax || b->openmax); c->integer = (a->integer && b->integer); } /** * snd_interval_div - refine the interval value with division * @a: dividend * @b: divisor * @c: quotient * * c = a / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = div32(a->min, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = div32(a->max, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /** * snd_interval_muldivk - refine the interval value * @a: dividend 1 * @b: dividend 2 * @k: divisor (as integer) * @c: result * * c = a * b / k * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b, unsigned int k, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, b->min, k, &r); c->openmin = (r || a->openmin || b->openmin); c->max = muldiv32(a->max, b->max, k, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmax); c->integer = 0; } /** * snd_interval_mulkdiv - refine the interval value * @a: dividend 1 * @k: dividend 2 (as integer) * @b: divisor * @c: result * * c = a * k / b * * Returns non-zero if the value is changed, zero if not changed. */ void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k, const struct snd_interval *b, struct snd_interval *c) { unsigned int r; if (a->empty || b->empty) { snd_interval_none(c); return; } c->empty = 0; c->min = muldiv32(a->min, k, b->max, &r); c->openmin = (r || a->openmin || b->openmax); if (b->min > 0) { c->max = muldiv32(a->max, k, b->min, &r); if (r) { c->max++; c->openmax = 1; } else c->openmax = (a->openmax || b->openmin); } else { c->max = UINT_MAX; c->openmax = 0; } c->integer = 0; } /* ---- */ /** * snd_interval_ratnum - refine the interval value * @i: interval to refine * @rats_count: number of ratnum_t * @rats: ratnum_t array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_ratnum(struct snd_interval *i, unsigned int rats_count, const struct snd_ratnum *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_den; int best_diff; unsigned int k; struct snd_interval t; int err; unsigned int result_num, result_den; int result_diff; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->min; int diff; if (q == 0) q = 1; den = div_up(num, q); if (den < rats[k].den_min) continue; if (den > rats[k].den_max) den = rats[k].den_max; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den -= r; } diff = num - q * den; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); result_num = best_num; result_diff = best_diff; result_den = best_den; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num = rats[k].num; unsigned int den; unsigned int q = i->max; int diff; if (q == 0) { i->empty = 1; return -EINVAL; } den = div_down(num, q); if (den > rats[k].den_max) continue; if (den < rats[k].den_min) den = rats[k].den_min; else { unsigned int r; r = (den - rats[k].den_min) % rats[k].den_step; if (r != 0) den += rats[k].den_step - r; } diff = q * den - num; if (diff < 0) diff = -diff; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (best_diff * result_den < result_diff * best_den) { result_num = best_num; result_den = best_den; } if (nump) *nump = result_num; if (denp) *denp = result_den; } return err; } EXPORT_SYMBOL(snd_interval_ratnum); /** * snd_interval_ratden - refine the interval value * @i: interval to refine * @rats_count: number of struct ratden * @rats: struct ratden array * @nump: pointer to store the resultant numerator * @denp: pointer to store the resultant denominator * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ static int snd_interval_ratden(struct snd_interval *i, unsigned int rats_count, const struct snd_ratden *rats, unsigned int *nump, unsigned int *denp) { unsigned int best_num, best_diff, best_den; unsigned int k; struct snd_interval t; int err; best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->min; int diff; num = mul(q, den); if (num > rats[k].num_max) continue; if (num < rats[k].num_min) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num += rats[k].num_step - r; } diff = num - q * den; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.min = div_down(best_num, best_den); t.openmin = !!(best_num % best_den); best_num = best_den = best_diff = 0; for (k = 0; k < rats_count; ++k) { unsigned int num; unsigned int den = rats[k].den; unsigned int q = i->max; int diff; num = mul(q, den); if (num < rats[k].num_min) continue; if (num > rats[k].num_max) num = rats[k].num_max; else { unsigned int r; r = (num - rats[k].num_min) % rats[k].num_step; if (r != 0) num -= r; } diff = q * den - num; if (best_num == 0 || diff * best_den < best_diff * den) { best_diff = diff; best_den = den; best_num = num; } } if (best_den == 0) { i->empty = 1; return -EINVAL; } t.max = div_up(best_num, best_den); t.openmax = !!(best_num % best_den); t.integer = 0; err = snd_interval_refine(i, &t); if (err < 0) return err; if (snd_interval_single(i)) { if (nump) *nump = best_num; if (denp) *denp = best_den; } return err; } /** * snd_interval_list - refine the interval value from the list * @i: the interval value to refine * @count: the number of elements in the list * @list: the value list * @mask: the bit-mask to evaluate * * Refines the interval value from the list. * When mask is non-zero, only the elements corresponding to bit 1 are * evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_list(struct snd_interval *i, unsigned int count, const unsigned int *list, unsigned int mask) { unsigned int k; struct snd_interval list_range; if (!count) { i->empty = 1; return -EINVAL; } snd_interval_any(&list_range); list_range.min = UINT_MAX; list_range.max = 0; for (k = 0; k < count; k++) { if (mask && !(mask & (1 << k))) continue; if (!snd_interval_test(i, list[k])) continue; list_range.min = min(list_range.min, list[k]); list_range.max = max(list_range.max, list[k]); } return snd_interval_refine(i, &list_range); } EXPORT_SYMBOL(snd_interval_list); /** * snd_interval_ranges - refine the interval value from the list of ranges * @i: the interval value to refine * @count: the number of elements in the list of ranges * @ranges: the ranges list * @mask: the bit-mask to evaluate * * Refines the interval value from the list of ranges. * When mask is non-zero, only the elements corresponding to bit 1 are * evaluated. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_interval_ranges(struct snd_interval *i, unsigned int count, const struct snd_interval *ranges, unsigned int mask) { unsigned int k; struct snd_interval range_union; struct snd_interval range; if (!count) { snd_interval_none(i); return -EINVAL; } snd_interval_any(&range_union); range_union.min = UINT_MAX; range_union.max = 0; for (k = 0; k < count; k++) { if (mask && !(mask & (1 << k))) continue; snd_interval_copy(&range, &ranges[k]); if (snd_interval_refine(&range, i) < 0) continue; if (snd_interval_empty(&range)) continue; if (range.min < range_union.min) { range_union.min = range.min; range_union.openmin = 1; } if (range.min == range_union.min && !range.openmin) range_union.openmin = 0; if (range.max > range_union.max) { range_union.max = range.max; range_union.openmax = 1; } if (range.max == range_union.max && !range.openmax) range_union.openmax = 0; } return snd_interval_refine(i, &range_union); } EXPORT_SYMBOL(snd_interval_ranges); static int snd_interval_step(struct snd_interval *i, unsigned int step) { unsigned int n; int changed = 0; n = i->min % step; if (n != 0 || i->openmin) { i->min += step - n; i->openmin = 0; changed = 1; } n = i->max % step; if (n != 0 || i->openmax) { i->max -= n; i->openmax = 0; changed = 1; } if (snd_interval_checkempty(i)) { i->empty = 1; return -EINVAL; } return changed; } /* Info constraints helpers */ /** * snd_pcm_hw_rule_add - add the hw-constraint rule * @runtime: the pcm runtime instance * @cond: condition bits * @var: the variable to evaluate * @func: the evaluation function * @private: the private data pointer passed to function * @dep: the dependent variables * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, int var, snd_pcm_hw_rule_func_t func, void *private, int dep, ...) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_pcm_hw_rule *c; unsigned int k; va_list args; va_start(args, dep); if (constrs->rules_num >= constrs->rules_all) { struct snd_pcm_hw_rule *new; unsigned int new_rules = constrs->rules_all + 16; new = krealloc_array(constrs->rules, new_rules, sizeof(*c), GFP_KERNEL); if (!new) { va_end(args); return -ENOMEM; } constrs->rules = new; constrs->rules_all = new_rules; } c = &constrs->rules[constrs->rules_num]; c->cond = cond; c->func = func; c->var = var; c->private = private; k = 0; while (1) { if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) { va_end(args); return -EINVAL; } c->deps[k++] = dep; if (dep < 0) break; dep = va_arg(args, int); } constrs->rules_num++; va_end(args); return 0; } EXPORT_SYMBOL(snd_pcm_hw_rule_add); /** * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the bitmap mask * * Apply the constraint of the given bitmap mask to a 32-bit mask parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int32_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); *maskp->bits &= mask; memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */ if (*maskp->bits == 0) return -EINVAL; return 0; } /** * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint * @runtime: PCM runtime instance * @var: hw_params variable to apply the mask * @mask: the 64bit bitmap mask * * Apply the constraint of the given bitmap mask to a 64-bit mask parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int64_t mask) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_mask *maskp = constrs_mask(constrs, var); maskp->bits[0] &= (u_int32_t)mask; maskp->bits[1] &= (u_int32_t)(mask >> 32); memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */ if (! maskp->bits[0] && ! maskp->bits[1]) return -EINVAL; return 0; } EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64); /** * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the integer constraint * * Apply the constraint of integer to an interval parameter. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; return snd_interval_setinteger(constrs_interval(constrs, var)); } EXPORT_SYMBOL(snd_pcm_hw_constraint_integer); /** * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval * @runtime: PCM runtime instance * @var: hw_params variable to apply the range * @min: the minimal value * @max: the maximal value * * Apply the min/max range constraint to an interval parameter. * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int min, unsigned int max) { struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints; struct snd_interval t; t.min = min; t.max = max; t.openmin = t.openmax = 0; t.integer = 0; return snd_interval_refine(constrs_interval(constrs, var), &t); } EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax); static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_list *list = rule->private; return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask); } /** * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the list constraint * @l: list * * Apply the list of constraints to an interval parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_list *l) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_list, (void *)l, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_list); static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_pcm_hw_constraint_ranges *r = rule->private; return snd_interval_ranges(hw_param_interval(params, rule->var), r->count, r->ranges, r->mask); } /** * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the list of range constraints * @r: ranges * * Apply the list of range constraints to an interval parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ranges *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ranges, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges); static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { const struct snd_pcm_hw_constraint_ratnums *r = rule->private; unsigned int num = 0, den = 0; int err; err = snd_interval_ratnum(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratnums constraint * @r: struct snd_ratnums constriants * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratnums *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratnums, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums); static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { const struct snd_pcm_hw_constraint_ratdens *r = rule->private; unsigned int num = 0, den = 0; int err = snd_interval_ratden(hw_param_interval(params, rule->var), r->nrats, r->rats, &num, &den); if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) { params->rate_num = num; params->rate_den = den; } return err; } /** * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the ratdens constraint * @r: struct snd_ratdens constriants * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratdens *r) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_ratdens, (void *)r, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens); static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int l = (unsigned long) rule->private; int width = l & 0xffff; unsigned int msbits = l >> 16; const struct snd_interval *i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (!snd_interval_single(i)) return 0; if ((snd_interval_value(i) == width) || (width == 0 && snd_interval_value(i) > msbits)) params->msbits = min_not_zero(params->msbits, msbits); return 0; } /** * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule * @runtime: PCM runtime instance * @cond: condition bits * @width: sample bits width * @msbits: msbits width * * This constraint will set the number of most significant bits (msbits) if a * sample format with the specified width has been select. If width is set to 0 * the msbits will be set for any sample format with a width larger than the * specified msbits. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, unsigned int cond, unsigned int width, unsigned int msbits) { unsigned long l = (msbits << 16) | width; return snd_pcm_hw_rule_add(runtime, cond, -1, snd_pcm_hw_rule_msbits, (void*) l, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits); static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned long step = (unsigned long) rule->private; return snd_interval_step(hw_param_interval(params, rule->var), step); } /** * snd_pcm_hw_constraint_step - add a hw constraint step rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the step constraint * @step: step size * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, unsigned long step) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_step, (void *) step, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_step); static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { static const unsigned int pow2_sizes[] = { 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7, 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23, 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30 }; return snd_interval_list(hw_param_interval(params, rule->var), ARRAY_SIZE(pow2_sizes), pow2_sizes, 0); } /** * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule * @runtime: PCM runtime instance * @cond: condition bits * @var: hw_params variable to apply the power-of-2 constraint * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var) { return snd_pcm_hw_rule_add(runtime, cond, var, snd_pcm_hw_rule_pow2, NULL, var, -1); } EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2); static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int base_rate = (unsigned int)(uintptr_t)rule->private; struct snd_interval *rate; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); return snd_interval_list(rate, 1, &base_rate, 0); } /** * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling * @runtime: PCM runtime instance * @base_rate: the rate at which the hardware does not resample * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, unsigned int base_rate) { return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE, SNDRV_PCM_HW_PARAM_RATE, snd_pcm_hw_rule_noresample_func, (void *)(uintptr_t)base_rate, SNDRV_PCM_HW_PARAM_RATE, -1); } EXPORT_SYMBOL(snd_pcm_hw_rule_noresample); static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_any(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } if (hw_is_interval(var)) { snd_interval_any(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; return; } snd_BUG(); } void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params) { unsigned int k; memset(params, 0, sizeof(*params)); for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) _snd_pcm_hw_param_any(params, k); for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) _snd_pcm_hw_param_any(params, k); params->info = ~0U; } EXPORT_SYMBOL(_snd_pcm_hw_params_any); /** * snd_pcm_hw_param_value - return @params field @var value * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Return: The value for field @var if it's fixed in configuration space * defined by @params. -%EINVAL otherwise. */ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { if (hw_is_mask(var)) { const struct snd_mask *mask = hw_param_mask_c(params, var); if (!snd_mask_single(mask)) return -EINVAL; if (dir) *dir = 0; return snd_mask_value(mask); } if (hw_is_interval(var)) { const struct snd_interval *i = hw_param_interval_c(params, var); if (!snd_interval_single(i)) return -EINVAL; if (dir) *dir = i->openmin; return snd_interval_value(i); } return -EINVAL; } EXPORT_SYMBOL(snd_pcm_hw_param_value); void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { if (hw_is_mask(var)) { snd_mask_none(hw_param_mask(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else if (hw_is_interval(var)) { snd_interval_none(hw_param_interval(params, var)); params->cmask |= 1 << var; params->rmask |= 1 << var; } else { snd_BUG(); } } EXPORT_SYMBOL(_snd_pcm_hw_param_setempty); static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_first(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_first(hw_param_interval(params, var)); else return -EINVAL; if (changed > 0) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_first - refine config space and return minimum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values > minimum. Reduce configuration space accordingly. * * Return: The minimum, or a negative error code on failure. */ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_first(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_first); static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { int changed; if (hw_is_mask(var)) changed = snd_mask_refine_last(hw_param_mask(params, var)); else if (hw_is_interval(var)) changed = snd_interval_refine_last(hw_param_interval(params, var)); else return -EINVAL; if (changed > 0) { params->cmask |= 1 << var; params->rmask |= 1 << var; } return changed; } /** * snd_pcm_hw_param_last - refine config space and return maximum value * @pcm: PCM instance * @params: the hw_params instance * @var: parameter to retrieve * @dir: pointer to the direction (-1,0,1) or %NULL * * Inside configuration space defined by @params remove from @var all * values < maximum. Reduce configuration space accordingly. * * Return: The maximum, or a negative error code on failure. */ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { int changed = _snd_pcm_hw_param_last(params, var); if (changed < 0) return changed; if (params->rmask) { int err = snd_pcm_hw_refine(pcm, params); if (err < 0) return err; } return snd_pcm_hw_param_value(params, var, dir); } EXPORT_SYMBOL(snd_pcm_hw_param_last); /** * snd_pcm_hw_params_bits - Get the number of bits per the sample. * @p: hardware parameters * * Return: The number of bits per sample based on the format, * subformat and msbits the specified hw params has. */ int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p) { snd_pcm_subformat_t subformat = params_subformat(p); snd_pcm_format_t format = params_format(p); switch (format) { case SNDRV_PCM_FORMAT_S32_LE: case SNDRV_PCM_FORMAT_U32_LE: case SNDRV_PCM_FORMAT_S32_BE: case SNDRV_PCM_FORMAT_U32_BE: switch (subformat) { case SNDRV_PCM_SUBFORMAT_MSBITS_20: return 20; case SNDRV_PCM_SUBFORMAT_MSBITS_24: return 24; case SNDRV_PCM_SUBFORMAT_MSBITS_MAX: case SNDRV_PCM_SUBFORMAT_STD: default: break; } fallthrough; default: return snd_pcm_format_width(format); } } EXPORT_SYMBOL(snd_pcm_hw_params_bits); static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_runtime *runtime = substream->runtime; guard(pcm_stream_lock_irqsave)(substream); if (snd_pcm_running(substream) && snd_pcm_update_hw_ptr(substream) >= 0) runtime->status->hw_ptr %= runtime->buffer_size; else { runtime->status->hw_ptr = 0; runtime->hw_ptr_wrap = 0; } return 0; } static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_channel_info *info = arg; struct snd_pcm_runtime *runtime = substream->runtime; int width; if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) { info->offset = -1; return 0; } width = snd_pcm_format_physical_width(runtime->format); if (width < 0) return width; info->offset = 0; switch (runtime->access) { case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED: case SNDRV_PCM_ACCESS_RW_INTERLEAVED: info->first = info->channel * width; info->step = runtime->channels * width; break; case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED: case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED: { size_t size = runtime->dma_bytes / runtime->channels; info->first = info->channel * size * 8; info->step = width; break; } default: snd_BUG(); break; } return 0; } static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, void *arg) { struct snd_pcm_hw_params *params = arg; snd_pcm_format_t format; int channels; ssize_t frame_size; params->fifo_size = substream->runtime->hw.fifo_size; if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) { format = params_format(params); channels = params_channels(params); frame_size = snd_pcm_format_size(format, channels); if (frame_size > 0) params->fifo_size /= frame_size; } return 0; } static int snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream *substream, void *arg) { static const unsigned char id[12] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (substream->runtime->std_sync_id) snd_pcm_set_sync_per_card(substream, arg, id, sizeof(id)); return 0; } /** * snd_pcm_lib_ioctl - a generic PCM ioctl callback * @substream: the pcm substream instance * @cmd: ioctl command * @arg: ioctl argument * * Processes the generic ioctl commands for PCM. * Can be passed as the ioctl callback for PCM ops. * * Return: Zero if successful, or a negative error code on failure. */ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { switch (cmd) { case SNDRV_PCM_IOCTL1_RESET: return snd_pcm_lib_ioctl_reset(substream, arg); case SNDRV_PCM_IOCTL1_CHANNEL_INFO: return snd_pcm_lib_ioctl_channel_info(substream, arg); case SNDRV_PCM_IOCTL1_FIFO_SIZE: return snd_pcm_lib_ioctl_fifo_size(substream, arg); case SNDRV_PCM_IOCTL1_SYNC_ID: return snd_pcm_lib_ioctl_sync_id(substream, arg); } return -ENXIO; } EXPORT_SYMBOL(snd_pcm_lib_ioctl); /** * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period * under acquired lock of PCM substream. * @substream: the instance of pcm substream. * * This function is called when the batch of audio data frames as the same size as the period of * buffer is already processed in audio data transmission. * * The call of function updates the status of runtime with the latest position of audio data * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM * substream according to configured threshold. * * The function is intended to use for the case that PCM driver operates audio data frames under * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead * since lock of PCM substream should be acquired in advance. * * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of * function: * * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state. * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state. * - .get_time_info - to retrieve audio time stamp if needed. * * Even if more than one periods have elapsed since the last call, you have to call this only once. */ void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return; runtime = substream->runtime; if (!snd_pcm_running(substream) || snd_pcm_update_hw_ptr0(substream, 1) < 0) goto _end; #ifdef CONFIG_SND_PCM_TIMER if (substream->timer_running) snd_timer_interrupt(substream->timer, 1); #endif _end: snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN); } EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock); /** * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of * PCM substream. * @substream: the instance of PCM substream. * * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for * acquiring lock of PCM substream voluntarily. * * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that * the batch of audio data frames as the same size as the period of buffer is already processed in * audio data transmission. */ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream) { if (snd_BUG_ON(!substream)) return; guard(pcm_stream_lock_irqsave)(substream); snd_pcm_period_elapsed_under_stream_lock(substream); } EXPORT_SYMBOL(snd_pcm_period_elapsed); /* * Wait until avail_min data becomes available * Returns a negative error code if any error occurs during operation. * The available space is stored on availp. When err = 0 and avail = 0 * on the capture stream, it indicates the stream is in DRAINING state. */ static int wait_for_avail(struct snd_pcm_substream *substream, snd_pcm_uframes_t *availp) { struct snd_pcm_runtime *runtime = substream->runtime; int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; wait_queue_entry_t wait; int err = 0; snd_pcm_uframes_t avail = 0; long wait_time, tout; init_waitqueue_entry(&wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&runtime->tsleep, &wait); if (runtime->no_period_wakeup) wait_time = MAX_SCHEDULE_TIMEOUT; else { /* use wait time from substream if available */ if (substream->wait_time) { wait_time = substream->wait_time; } else { wait_time = 100; if (runtime->rate) { long t = runtime->buffer_size * 1100 / runtime->rate; wait_time = max(t, wait_time); } } wait_time = msecs_to_jiffies(wait_time); } for (;;) { if (signal_pending(current)) { err = -ERESTARTSYS; break; } /* * We need to check if space became available already * (and thus the wakeup happened already) first to close * the race of space already having become available. * This check must happen after been added to the waitqueue * and having current state be INTERRUPTIBLE. */ avail = snd_pcm_avail(substream); if (avail >= runtime->twake) break; snd_pcm_stream_unlock_irq(substream); tout = schedule_timeout(wait_time); snd_pcm_stream_lock_irq(substream); set_current_state(TASK_INTERRUPTIBLE); switch (runtime->state) { case SNDRV_PCM_STATE_SUSPENDED: err = -ESTRPIPE; goto _endloop; case SNDRV_PCM_STATE_XRUN: err = -EPIPE; goto _endloop; case SNDRV_PCM_STATE_DRAINING: if (is_playback) err = -EPIPE; else avail = 0; /* indicate draining */ goto _endloop; case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_DISCONNECTED: err = -EBADFD; goto _endloop; case SNDRV_PCM_STATE_PAUSED: continue; } if (!tout) { pcm_dbg(substream->pcm, "%s timeout (DMA or IRQ trouble?)\n", is_playback ? "playback write" : "capture read"); err = -EIO; break; } } _endloop: set_current_state(TASK_RUNNING); remove_wait_queue(&runtime->tsleep, &wait); *availp = avail; return err; } typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes); typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *, snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f, bool); /* calculate the target DMA-buffer position to be written/read */ static void *get_dma_ptr(struct snd_pcm_runtime *runtime, int channel, unsigned long hwoff) { return runtime->dma_area + hwoff + channel * (runtime->dma_bytes / runtime->channels); } /* default copy ops for write; used for both interleaved and non- modes */ static int default_write_copy(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes) { if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff), bytes, iter) != bytes) return -EFAULT; return 0; } /* fill silence instead of copy data; called as a transfer helper * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when * a NULL buffer is passed */ static int fill_silence(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes) { struct snd_pcm_runtime *runtime = substream->runtime; if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) return 0; if (substream->ops->fill_silence) return substream->ops->fill_silence(substream, channel, hwoff, bytes); snd_pcm_format_set_silence(runtime->format, get_dma_ptr(runtime, channel, hwoff), bytes_to_samples(runtime, bytes)); return 0; } /* default copy ops for read; used for both interleaved and non- modes */ static int default_read_copy(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes) { if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff), bytes, iter) != bytes) return -EFAULT; return 0; } /* call transfer with the filled iov_iter */ static int do_transfer(struct snd_pcm_substream *substream, int c, unsigned long hwoff, void *data, unsigned long bytes, pcm_transfer_f transfer, bool in_kernel) { struct iov_iter iter; int err, type; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) type = ITER_SOURCE; else type = ITER_DEST; if (in_kernel) { struct kvec kvec = { data, bytes }; iov_iter_kvec(&iter, type, &kvec, 1, bytes); return transfer(substream, c, hwoff, &iter, bytes); } err = import_ubuf(type, (__force void __user *)data, bytes, &iter); if (err) return err; return transfer(substream, c, hwoff, &iter, bytes); } /* call transfer function with the converted pointers and sizes; * for interleaved mode, it's one shot for all samples */ static int interleaved_copy(struct snd_pcm_substream *substream, snd_pcm_uframes_t hwoff, void *data, snd_pcm_uframes_t off, snd_pcm_uframes_t frames, pcm_transfer_f transfer, bool in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; /* convert to bytes */ hwoff = frames_to_bytes(runtime, hwoff); off = frames_to_bytes(runtime, off); frames = frames_to_bytes(runtime, frames); return do_transfer(substream, 0, hwoff, data + off, frames, transfer, in_kernel); } /* call transfer function with the converted pointers and sizes for each * non-interleaved channel; when buffer is NULL, silencing instead of copying */ static int noninterleaved_copy(struct snd_pcm_substream *substream, snd_pcm_uframes_t hwoff, void *data, snd_pcm_uframes_t off, snd_pcm_uframes_t frames, pcm_transfer_f transfer, bool in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; int channels = runtime->channels; void **bufs = data; int c, err; /* convert to bytes; note that it's not frames_to_bytes() here. * in non-interleaved mode, we copy for each channel, thus * each copy is n_samples bytes x channels = whole frames. */ off = samples_to_bytes(runtime, off); frames = samples_to_bytes(runtime, frames); hwoff = samples_to_bytes(runtime, hwoff); for (c = 0; c < channels; ++c, ++bufs) { if (!data || !*bufs) err = fill_silence(substream, c, hwoff, NULL, frames); else err = do_transfer(substream, c, hwoff, *bufs + off, frames, transfer, in_kernel); if (err < 0) return err; } return 0; } /* fill silence on the given buffer position; * called from snd_pcm_playback_silence() */ static int fill_silence_frames(struct snd_pcm_substream *substream, snd_pcm_uframes_t off, snd_pcm_uframes_t frames) { if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) return interleaved_copy(substream, off, NULL, 0, frames, fill_silence, true); else return noninterleaved_copy(substream, off, NULL, 0, frames, fill_silence, true); } /* sanity-check for read/write methods */ static int pcm_sanity_check(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area)) return -EINVAL; if (runtime->state == SNDRV_PCM_STATE_OPEN) return -EBADFD; return 0; } static int pcm_accessible_state(struct snd_pcm_runtime *runtime) { switch (runtime->state) { case SNDRV_PCM_STATE_PREPARED: case SNDRV_PCM_STATE_RUNNING: case SNDRV_PCM_STATE_PAUSED: return 0; case SNDRV_PCM_STATE_XRUN: return -EPIPE; case SNDRV_PCM_STATE_SUSPENDED: return -ESTRPIPE; default: return -EBADFD; } } /* update to the given appl_ptr and call ack callback if needed; * when an error is returned, take back to the original value */ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, snd_pcm_uframes_t appl_ptr) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr; snd_pcm_sframes_t diff; int ret; if (old_appl_ptr == appl_ptr) return 0; if (appl_ptr >= runtime->boundary) return -EINVAL; /* * check if a rewind is requested by the application */ if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) { diff = appl_ptr - old_appl_ptr; if (diff >= 0) { if (diff > runtime->buffer_size) return -EINVAL; } else { if (runtime->boundary + diff > runtime->buffer_size) return -EINVAL; } } runtime->control->appl_ptr = appl_ptr; if (substream->ops->ack) { ret = substream->ops->ack(substream); if (ret < 0) { runtime->control->appl_ptr = old_appl_ptr; if (ret == -EPIPE) __snd_pcm_xrun(substream); return ret; } } trace_applptr(substream, old_appl_ptr, appl_ptr); return 0; } /* the common loop for read/write data */ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, void *data, bool interleaved, snd_pcm_uframes_t size, bool in_kernel) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t xfer = 0; snd_pcm_uframes_t offset = 0; snd_pcm_uframes_t avail; pcm_copy_f writer; pcm_transfer_f transfer; bool nonblock; bool is_playback; int err; err = pcm_sanity_check(substream); if (err < 0) return err; is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; if (interleaved) { if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED && runtime->channels > 1) return -EINVAL; writer = interleaved_copy; } else { if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) return -EINVAL; writer = noninterleaved_copy; } if (!data) { if (is_playback) transfer = fill_silence; else return -EINVAL; } else { if (substream->ops->copy) transfer = substream->ops->copy; else transfer = is_playback ? default_write_copy : default_read_copy; } if (size == 0) return 0; nonblock = !!(substream->f_flags & O_NONBLOCK); snd_pcm_stream_lock_irq(substream); err = pcm_accessible_state(runtime); if (err < 0) goto _end_unlock; runtime->twake = runtime->control->avail_min ? : 1; if (runtime->state == SNDRV_PCM_STATE_RUNNING) snd_pcm_update_hw_ptr(substream); /* * If size < start_threshold, wait indefinitely. Another * thread may start capture */ if (!is_playback && runtime->state == SNDRV_PCM_STATE_PREPARED && size >= runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } avail = snd_pcm_avail(substream); while (size > 0) { snd_pcm_uframes_t frames, appl_ptr, appl_ofs; snd_pcm_uframes_t cont; if (!avail) { if (!is_playback && runtime->state == SNDRV_PCM_STATE_DRAINING) { snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); goto _end_unlock; } if (nonblock) { err = -EAGAIN; goto _end_unlock; } runtime->twake = min_t(snd_pcm_uframes_t, size, runtime->control->avail_min ? : 1); err = wait_for_avail(substream, &avail); if (err < 0) goto _end_unlock; if (!avail) continue; /* draining */ } frames = size > avail ? avail : size; appl_ptr = READ_ONCE(runtime->control->appl_ptr); appl_ofs = appl_ptr % runtime->buffer_size; cont = runtime->buffer_size - appl_ofs; if (frames > cont) frames = cont; if (snd_BUG_ON(!frames)) { err = -EINVAL; goto _end_unlock; } if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) { err = -EBUSY; goto _end_unlock; } snd_pcm_stream_unlock_irq(substream); if (!is_playback) snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU); err = writer(substream, appl_ofs, data, offset, frames, transfer, in_kernel); if (is_playback) snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE); snd_pcm_stream_lock_irq(substream); atomic_dec(&runtime->buffer_accessing); if (err < 0) goto _end_unlock; err = pcm_accessible_state(runtime); if (err < 0) goto _end_unlock; appl_ptr += frames; if (appl_ptr >= runtime->boundary) appl_ptr -= runtime->boundary; err = pcm_lib_apply_appl_ptr(substream, appl_ptr); if (err < 0) goto _end_unlock; offset += frames; size -= frames; xfer += frames; avail -= frames; if (is_playback && runtime->state == SNDRV_PCM_STATE_PREPARED && snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) { err = snd_pcm_start(substream); if (err < 0) goto _end_unlock; } } _end_unlock: runtime->twake = 0; if (xfer > 0 && err >= 0) snd_pcm_update_state(substream, runtime); snd_pcm_stream_unlock_irq(substream); return xfer > 0 ? (snd_pcm_sframes_t)xfer : err; } EXPORT_SYMBOL(__snd_pcm_lib_xfer); /* * standard channel mapping helpers */ /* default channel maps for multi-channel playbacks, up to 8 channels */ const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 6, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, { .channels = 8, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, { } }; EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps); /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */ const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = { { .channels = 1, .map = { SNDRV_CHMAP_MONO } }, { .channels = 2, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, { .channels = 4, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 6, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, { .channels = 8, .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE, SNDRV_CHMAP_RL, SNDRV_CHMAP_RR, SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, { } }; EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps); static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch) { if (ch > info->max_channels) return false; return !info->channel_mask || (info->channel_mask & (1U << ch)); } static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = info->max_channels; uinfo->value.integer.min = 0; uinfo->value.integer.max = SNDRV_CHMAP_LAST; return 0; } /* get callback for channel map ctl element * stores the channel position firstly matching with the current channels */ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); struct snd_pcm_substream *substream; const struct snd_pcm_chmap_elem *map; if (!info->chmap) return -EINVAL; substream = snd_pcm_chmap_substream(info, idx); if (!substream) return -ENODEV; memset(ucontrol->value.integer.value, 0, sizeof(long) * info->max_channels); if (!substream->runtime) return 0; /* no channels set */ for (map = info->chmap; map->channels; map++) { int i; if (map->channels == substream->runtime->channels && valid_chmap_channels(info, map->channels)) { for (i = 0; i < map->channels; i++) ucontrol->value.integer.value[i] = map->map[i]; return 0; } } return -EINVAL; } /* tlv callback for channel map ctl element * expands the pre-defined channel maps in a form of TLV */ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); const struct snd_pcm_chmap_elem *map; unsigned int __user *dst; int c, count = 0; if (!info->chmap) return -EINVAL; if (size < 8) return -ENOMEM; if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) return -EFAULT; size -= 8; dst = tlv + 2; for (map = info->chmap; map->channels; map++) { int chs_bytes = map->channels * 4; if (!valid_chmap_channels(info, map->channels)) continue; if (size < 8) return -ENOMEM; if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) || put_user(chs_bytes, dst + 1)) return -EFAULT; dst += 2; size -= 8; count += 8; if (size < chs_bytes) return -ENOMEM; size -= chs_bytes; count += chs_bytes; for (c = 0; c < map->channels; c++) { if (put_user(map->map[c], dst)) return -EFAULT; dst++; } } if (put_user(count, tlv + 1)) return -EFAULT; return 0; } static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); info->pcm->streams[info->stream].chmap_kctl = NULL; kfree(info); } /** * snd_pcm_add_chmap_ctls - create channel-mapping control elements * @pcm: the assigned PCM instance * @stream: stream direction * @chmap: channel map elements (for query) * @max_channels: the max number of channels for the stream * @private_value: the value passed to each kcontrol's private_value field * @info_ret: store struct snd_pcm_chmap instance if non-NULL * * Create channel-mapping control elements assigned to the given PCM stream(s). * Return: Zero if successful, or a negative error value. */ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, const struct snd_pcm_chmap_elem *chmap, int max_channels, unsigned long private_value, struct snd_pcm_chmap **info_ret) { struct snd_pcm_chmap *info; struct snd_kcontrol_new knew = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE | SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, .info = pcm_chmap_ctl_info, .get = pcm_chmap_ctl_get, .tlv.c = pcm_chmap_ctl_tlv, }; int err; if (WARN_ON(pcm->streams[stream].chmap_kctl)) return -EBUSY; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->pcm = pcm; info->stream = stream; info->chmap = chmap; info->max_channels = max_channels; if (stream == SNDRV_PCM_STREAM_PLAYBACK) knew.name = "Playback Channel Map"; else knew.name = "Capture Channel Map"; knew.device = pcm->device; knew.count = pcm->streams[stream].substream_count; knew.private_value = private_value; info->kctl = snd_ctl_new1(&knew, info); if (!info->kctl) { kfree(info); return -ENOMEM; } info->kctl->private_free = pcm_chmap_ctl_private_free; err = snd_ctl_add(pcm->card, info->kctl); if (err < 0) return err; pcm->streams[stream].chmap_kctl = info->kctl; if (info_ret) *info_ret = info; return 0; } EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010 Daniel Mack <daniel@caiaq.de> * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices in version 2.0. * Comments below reference relevant sections of the documents contained * in http://www.usb.org/developers/devclass_docs/Audio2.0_final.zip */ #ifndef __LINUX_USB_AUDIO_V2_H #define __LINUX_USB_AUDIO_V2_H #include <linux/types.h> /* v1.0 and v2.0 of this standard have many things in common. For the rest * of the definitions, please refer to audio.h */ /* * bmControl field decoders * * From the USB Audio spec v2.0: * * bmaControls() is a (ch+1)-element array of 4-byte bitmaps, * each containing a set of bit pairs. If a Control is present, * it must be Host readable. If a certain Control is not * present then the bit pair must be set to 0b00. * If a Control is present but read-only, the bit pair must be * set to 0b01. If a Control is also Host programmable, the bit * pair must be set to 0b11. The value 0b10 is not allowed. * */ static inline bool uac_v2v3_control_is_readable(u32 bmControls, u8 control) { return (bmControls >> ((control - 1) * 2)) & 0x1; } static inline bool uac_v2v3_control_is_writeable(u32 bmControls, u8 control) { return (bmControls >> ((control - 1) * 2)) & 0x2; } /* 4.7.2 Class-Specific AC Interface Descriptor */ struct uac2_ac_header_descriptor { __u8 bLength; /* 9 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ __le16 bcdADC; /* 0x0200 */ __u8 bCategory; __le16 wTotalLength; /* includes Unit and Terminal desc. */ __u8 bmControls; } __packed; /* 2.3.1.6 Type I Format Type Descriptor (Frmts20 final.pdf)*/ struct uac2_format_type_i_descriptor { __u8 bLength; /* in bytes: 6 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ __u8 bFormatType; /* FORMAT_TYPE_1 */ __u8 bSubslotSize; /* {1,2,3,4} */ __u8 bBitResolution; } __packed; /* 4.7.2.1 Clock Source Descriptor */ struct uac_clock_source_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bmAttributes; __u8 bmControls; __u8 bAssocTerminal; __u8 iClockSource; } __attribute__((packed)); /* bmAttribute fields */ #define UAC_CLOCK_SOURCE_TYPE_EXT 0x0 #define UAC_CLOCK_SOURCE_TYPE_INT_FIXED 0x1 #define UAC_CLOCK_SOURCE_TYPE_INT_VAR 0x2 #define UAC_CLOCK_SOURCE_TYPE_INT_PROG 0x3 #define UAC_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 2) /* 4.7.2.2 Clock Selector Descriptor */ struct uac_clock_selector_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bNrInPins; __u8 baCSourceID[]; /* bmControls and iClockSelector omitted */ } __attribute__((packed)); /* 4.7.2.3 Clock Multiplier Descriptor */ struct uac_clock_multiplier_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bCSourceID; __u8 bmControls; __u8 iClockMultiplier; } __attribute__((packed)); /* 4.7.2.4 Input terminal descriptor */ struct uac2_input_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bCSourceID; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); /* 4.7.2.5 Output terminal descriptor */ struct uac2_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 bCSourceID; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); /* 4.7.2.8 Feature Unit Descriptor */ struct uac2_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; /* bmaControls is actually u32, * but u8 is needed for the hybrid parser */ __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); #define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4) /* As above, but more useful for defining your own descriptors: */ #define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \ struct uac2_feature_unit_descriptor_##ch { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bUnitID; \ __u8 bSourceID; \ __le32 bmaControls[ch + 1]; \ __u8 iFeature; \ } __packed /* 4.7.2.10 Effect Unit Descriptor */ struct uac2_effect_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __le16 wEffectType; __u8 bSourceID; __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); /* 4.9.2 Class-Specific AS Interface Descriptor */ struct uac2_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalLink; __u8 bmControls; __u8 bFormatType; __le32 bmFormats; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); #define UAC2_FORMAT_TYPE_I_RAW_DATA (1 << 31) /* 4.10.1.2 Class-Specific AS Isochronous Audio Data Endpoint Descriptor */ struct uac2_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 8 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ __u8 bDescriptorSubtype; /* EP_GENERAL */ __u8 bmAttributes; __u8 bmControls; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); #define UAC2_CONTROL_PITCH (3 << 0) #define UAC2_CONTROL_DATA_OVERRUN (3 << 2) #define UAC2_CONTROL_DATA_UNDERRUN (3 << 4) /* 5.2.5.4.2 Connector Control Parameter Block */ struct uac2_connectors_ctl_blk { __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); /* 6.1 Interrupt Data Message */ #define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0) #define UAC2_INTERRUPT_DATA_MSG_EP (1 << 1) struct uac2_interrupt_data_msg { __u8 bInfo; __u8 bAttribute; __le16 wValue; __le16 wIndex; } __attribute__((packed)); /* A.7 Audio Function Category Codes */ #define UAC2_FUNCTION_SUBCLASS_UNDEFINED 0x00 #define UAC2_FUNCTION_DESKTOP_SPEAKER 0x01 #define UAC2_FUNCTION_HOME_THEATER 0x02 #define UAC2_FUNCTION_MICROPHONE 0x03 #define UAC2_FUNCTION_HEADSET 0x04 #define UAC2_FUNCTION_TELEPHONE 0x05 #define UAC2_FUNCTION_CONVERTER 0x06 #define UAC2_FUNCTION_SOUND_RECORDER 0x07 #define UAC2_FUNCTION_IO_BOX 0x08 #define UAC2_FUNCTION_MUSICAL_INSTRUMENT 0x09 #define UAC2_FUNCTION_PRO_AUDIO 0x0a #define UAC2_FUNCTION_AUDIO_VIDEO 0x0b #define UAC2_FUNCTION_CONTROL_PANEL 0x0c #define UAC2_FUNCTION_OTHER 0xff /* A.9 Audio Class-Specific AC Interface Descriptor Subtypes */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_EFFECT_UNIT 0x07 #define UAC2_PROCESSING_UNIT_V2 0x08 #define UAC2_EXTENSION_UNIT_V2 0x09 #define UAC2_CLOCK_SOURCE 0x0a #define UAC2_CLOCK_SELECTOR 0x0b #define UAC2_CLOCK_MULTIPLIER 0x0c #define UAC2_SAMPLE_RATE_CONVERTER 0x0d /* A.10 Audio Class-Specific AS Interface Descriptor Subtypes */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_ENCODER 0x03 #define UAC2_DECODER 0x04 /* A.11 Effect Unit Effect Types */ #define UAC2_EFFECT_UNDEFINED 0x00 #define UAC2_EFFECT_PARAM_EQ 0x01 #define UAC2_EFFECT_REVERB 0x02 #define UAC2_EFFECT_MOD_DELAY 0x03 #define UAC2_EFFECT_DYN_RANGE_COMP 0x04 /* A.12 Processing Unit Process Types */ #define UAC2_PROCESS_UNDEFINED 0x00 #define UAC2_PROCESS_UP_DOWNMIX 0x01 #define UAC2_PROCESS_DOLBY_PROLOCIC 0x02 #define UAC2_PROCESS_STEREO_EXTENDER 0x03 /* A.14 Audio Class-Specific Request Codes */ #define UAC2_CS_CUR 0x01 #define UAC2_CS_RANGE 0x02 #define UAC2_CS_MEM 0x03 /* A.15 Encoder Type Codes */ #define UAC2_ENCODER_UNDEFINED 0x00 #define UAC2_ENCODER_OTHER 0x01 #define UAC2_ENCODER_MPEG 0x02 #define UAC2_ENCODER_AC3 0x03 #define UAC2_ENCODER_WMA 0x04 #define UAC2_ENCODER_DTS 0x05 /* A.16 Decoder Type Codes */ #define UAC2_DECODER_UNDEFINED 0x00 #define UAC2_DECODER_OTHER 0x01 #define UAC2_DECODER_MPEG 0x02 #define UAC2_DECODER_AC3 0x03 #define UAC2_DECODER_WMA 0x04 #define UAC2_DECODER_DTS 0x05 /* A.17.1 Clock Source Control Selectors */ #define UAC2_CS_UNDEFINED 0x00 #define UAC2_CS_CONTROL_SAM_FREQ 0x01 #define UAC2_CS_CONTROL_CLOCK_VALID 0x02 /* A.17.2 Clock Selector Control Selectors */ #define UAC2_CX_UNDEFINED 0x00 #define UAC2_CX_CLOCK_SELECTOR 0x01 /* A.17.3 Clock Multiplier Control Selectors */ #define UAC2_CM_UNDEFINED 0x00 #define UAC2_CM_NUMERATOR 0x01 #define UAC2_CM_DENOMINTATOR 0x02 /* A.17.4 Terminal Control Selectors */ #define UAC2_TE_UNDEFINED 0x00 #define UAC2_TE_COPY_PROTECT 0x01 #define UAC2_TE_CONNECTOR 0x02 #define UAC2_TE_OVERLOAD 0x03 #define UAC2_TE_CLUSTER 0x04 #define UAC2_TE_UNDERFLOW 0x05 #define UAC2_TE_OVERFLOW 0x06 #define UAC2_TE_LATENCY 0x07 /* A.17.5 Mixer Control Selectors */ #define UAC2_MU_UNDEFINED 0x00 #define UAC2_MU_MIXER 0x01 #define UAC2_MU_CLUSTER 0x02 #define UAC2_MU_UNDERFLOW 0x03 #define UAC2_MU_OVERFLOW 0x04 #define UAC2_MU_LATENCY 0x05 /* A.17.6 Selector Control Selectors */ #define UAC2_SU_UNDEFINED 0x00 #define UAC2_SU_SELECTOR 0x01 #define UAC2_SU_LATENCY 0x02 /* A.17.7 Feature Unit Control Selectors */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_FU_INPUT_GAIN 0x0b #define UAC2_FU_INPUT_GAIN_PAD 0x0c #define UAC2_FU_PHASE_INVERTER 0x0d #define UAC2_FU_UNDERFLOW 0x0e #define UAC2_FU_OVERFLOW 0x0f #define UAC2_FU_LATENCY 0x10 /* A.17.8.1 Parametric Equalizer Section Effect Unit Control Selectors */ #define UAC2_PE_UNDEFINED 0x00 #define UAC2_PE_ENABLE 0x01 #define UAC2_PE_CENTERFREQ 0x02 #define UAC2_PE_QFACTOR 0x03 #define UAC2_PE_GAIN 0x04 #define UAC2_PE_UNDERFLOW 0x05 #define UAC2_PE_OVERFLOW 0x06 #define UAC2_PE_LATENCY 0x07 /* A.17.8.2 Reverberation Effect Unit Control Selectors */ #define UAC2_RV_UNDEFINED 0x00 #define UAC2_RV_ENABLE 0x01 #define UAC2_RV_TYPE 0x02 #define UAC2_RV_LEVEL 0x03 #define UAC2_RV_TIME 0x04 #define UAC2_RV_FEEDBACK 0x05 #define UAC2_RV_PREDELAY 0x06 #define UAC2_RV_DENSITY 0x07 #define UAC2_RV_HIFREQ_ROLLOFF 0x08 #define UAC2_RV_UNDERFLOW 0x09 #define UAC2_RV_OVERFLOW 0x0a #define UAC2_RV_LATENCY 0x0b /* A.17.8.3 Modulation Delay Effect Control Selectors */ #define UAC2_MD_UNDEFINED 0x00 #define UAC2_MD_ENABLE 0x01 #define UAC2_MD_BALANCE 0x02 #define UAC2_MD_RATE 0x03 #define UAC2_MD_DEPTH 0x04 #define UAC2_MD_TIME 0x05 #define UAC2_MD_FEEDBACK 0x06 #define UAC2_MD_UNDERFLOW 0x07 #define UAC2_MD_OVERFLOW 0x08 #define UAC2_MD_LATENCY 0x09 /* A.17.8.4 Dynamic Range Compressor Effect Unit Control Selectors */ #define UAC2_DR_UNDEFINED 0x00 #define UAC2_DR_ENABLE 0x01 #define UAC2_DR_COMPRESSION_RATE 0x02 #define UAC2_DR_MAXAMPL 0x03 #define UAC2_DR_THRESHOLD 0x04 #define UAC2_DR_ATTACK_TIME 0x05 #define UAC2_DR_RELEASE_TIME 0x06 #define UAC2_DR_UNDEFLOW 0x07 #define UAC2_DR_OVERFLOW 0x08 #define UAC2_DR_LATENCY 0x09 /* A.17.9.1 Up/Down-mix Processing Unit Control Selectors */ #define UAC2_UD_UNDEFINED 0x00 #define UAC2_UD_ENABLE 0x01 #define UAC2_UD_MODE_SELECT 0x02 #define UAC2_UD_CLUSTER 0x03 #define UAC2_UD_UNDERFLOW 0x04 #define UAC2_UD_OVERFLOW 0x05 #define UAC2_UD_LATENCY 0x06 /* A.17.9.2 Dolby Prologic[tm] Processing Unit Control Selectors */ #define UAC2_DP_UNDEFINED 0x00 #define UAC2_DP_ENABLE 0x01 #define UAC2_DP_MODE_SELECT 0x02 #define UAC2_DP_CLUSTER 0x03 #define UAC2_DP_UNDERFFLOW 0x04 #define UAC2_DP_OVERFLOW 0x05 #define UAC2_DP_LATENCY 0x06 /* A.17.9.3 Stereo Expander Processing Unit Control Selectors */ #define UAC2_ST_EXT_UNDEFINED 0x00 #define UAC2_ST_EXT_ENABLE 0x01 #define UAC2_ST_EXT_WIDTH 0x02 #define UAC2_ST_EXT_UNDEFLOW 0x03 #define UAC2_ST_EXT_OVERFLOW 0x04 #define UAC2_ST_EXT_LATENCY 0x05 /* A.17.10 Extension Unit Control Selectors */ #define UAC2_XU_UNDEFINED 0x00 #define UAC2_XU_ENABLE 0x01 #define UAC2_XU_CLUSTER 0x02 #define UAC2_XU_UNDERFLOW 0x03 #define UAC2_XU_OVERFLOW 0x04 #define UAC2_XU_LATENCY 0x05 /* A.17.11 AudioStreaming Interface Control Selectors */ #define UAC2_AS_UNDEFINED 0x00 #define UAC2_AS_ACT_ALT_SETTING 0x01 #define UAC2_AS_VAL_ALT_SETTINGS 0x02 #define UAC2_AS_AUDIO_DATA_FORMAT 0x03 /* A.17.12 Encoder Control Selectors */ #define UAC2_EN_UNDEFINED 0x00 #define UAC2_EN_BIT_RATE 0x01 #define UAC2_EN_QUALITY 0x02 #define UAC2_EN_VBR 0x03 #define UAC2_EN_TYPE 0x04 #define UAC2_EN_UNDERFLOW 0x05 #define UAC2_EN_OVERFLOW 0x06 #define UAC2_EN_ENCODER_ERROR 0x07 #define UAC2_EN_PARAM1 0x08 #define UAC2_EN_PARAM2 0x09 #define UAC2_EN_PARAM3 0x0a #define UAC2_EN_PARAM4 0x0b #define UAC2_EN_PARAM5 0x0c #define UAC2_EN_PARAM6 0x0d #define UAC2_EN_PARAM7 0x0e #define UAC2_EN_PARAM8 0x0f /* A.17.13.1 MPEG Decoder Control Selectors */ #define UAC2_MPEG_UNDEFINED 0x00 #define UAC2_MPEG_DUAL_CHANNEL 0x01 #define UAC2_MPEG_SECOND_STEREO 0x02 #define UAC2_MPEG_MULTILINGUAL 0x03 #define UAC2_MPEG_DYN_RANGE 0x04 #define UAC2_MPEG_SCALING 0x05 #define UAC2_MPEG_HILO_SCALING 0x06 #define UAC2_MPEG_UNDERFLOW 0x07 #define UAC2_MPEG_OVERFLOW 0x08 #define UAC2_MPEG_DECODER_ERROR 0x09 /* A17.13.2 AC3 Decoder Control Selectors */ #define UAC2_AC3_UNDEFINED 0x00 #define UAC2_AC3_MODE 0x01 #define UAC2_AC3_DYN_RANGE 0x02 #define UAC2_AC3_SCALING 0x03 #define UAC2_AC3_HILO_SCALING 0x04 #define UAC2_AC3_UNDERFLOW 0x05 #define UAC2_AC3_OVERFLOW 0x06 #define UAC2_AC3_DECODER_ERROR 0x07 /* A17.13.3 WMA Decoder Control Selectors */ #define UAC2_WMA_UNDEFINED 0x00 #define UAC2_WMA_UNDERFLOW 0x01 #define UAC2_WMA_OVERFLOW 0x02 #define UAC2_WMA_DECODER_ERROR 0x03 /* A17.13.4 DTS Decoder Control Selectors */ #define UAC2_DTS_UNDEFINED 0x00 #define UAC2_DTS_UNDERFLOW 0x01 #define UAC2_DTS_OVERFLOW 0x02 #define UAC2_DTS_DECODER_ERROR 0x03 /* A17.14 Endpoint Control Selectors */ #define UAC2_EP_CS_UNDEFINED 0x00 #define UAC2_EP_CS_PITCH 0x01 #define UAC2_EP_CS_DATA_OVERRUN 0x02 #define UAC2_EP_CS_DATA_UNDERRUN 0x03 #endif /* __LINUX_USB_AUDIO_V2_H */
375 375 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 // SPDX-License-Identifier: GPL-2.0-only /* * xsave/xrstor support. * * Author: Suresh Siddha <suresh.b.siddha@intel.com> */ #include <linux/bitops.h> #include <linux/compat.h> #include <linux/cpu.h> #include <linux/mman.h> #include <linux/nospec.h> #include <linux/pkeys.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/coredump.h> #include <asm/fpu/api.h> #include <asm/fpu/regset.h> #include <asm/fpu/signal.h> #include <asm/fpu/xcr.h> #include <asm/cpuid.h> #include <asm/tlbflush.h> #include <asm/prctl.h> #include <asm/elf.h> #include <uapi/asm/elf.h> #include "context.h" #include "internal.h" #include "legacy.h" #include "xstate.h" #define for_each_extended_xfeature(bit, mask) \ (bit) = FIRST_EXTENDED_XFEATURE; \ for_each_set_bit_from(bit, (unsigned long *)&(mask), 8 * sizeof(mask)) /* * Although we spell it out in here, the Processor Trace * xfeature is completely unused. We use other mechanisms * to save/restore PT state in Linux. */ static const char *xfeature_names[] = { "x87 floating point registers", "SSE registers", "AVX registers", "MPX bounds registers", "MPX CSR", "AVX-512 opmask", "AVX-512 Hi256", "AVX-512 ZMM_Hi256", "Processor Trace (unused)", "Protection Keys User registers", "PASID state", "Control-flow User registers", "Control-flow Kernel registers (unused)", "unknown xstate feature", "unknown xstate feature", "unknown xstate feature", "unknown xstate feature", "AMX Tile config", "AMX Tile data", "unknown xstate feature", }; static unsigned short xsave_cpuid_features[] __initdata = { [XFEATURE_FP] = X86_FEATURE_FPU, [XFEATURE_SSE] = X86_FEATURE_XMM, [XFEATURE_YMM] = X86_FEATURE_AVX, [XFEATURE_BNDREGS] = X86_FEATURE_MPX, [XFEATURE_BNDCSR] = X86_FEATURE_MPX, [XFEATURE_OPMASK] = X86_FEATURE_AVX512F, [XFEATURE_ZMM_Hi256] = X86_FEATURE_AVX512F, [XFEATURE_Hi16_ZMM] = X86_FEATURE_AVX512F, [XFEATURE_PT_UNIMPLEMENTED_SO_FAR] = X86_FEATURE_INTEL_PT, [XFEATURE_PKRU] = X86_FEATURE_OSPKE, [XFEATURE_PASID] = X86_FEATURE_ENQCMD, [XFEATURE_CET_USER] = X86_FEATURE_SHSTK, [XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE, [XFEATURE_XTILE_DATA] = X86_FEATURE_AMX_TILE, }; static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init = { [ 0 ... XFEATURE_MAX - 1] = -1}; static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init = { [ 0 ... XFEATURE_MAX - 1] = -1}; static unsigned int xstate_flags[XFEATURE_MAX] __ro_after_init; #define XSTATE_FLAG_SUPERVISOR BIT(0) #define XSTATE_FLAG_ALIGNED64 BIT(1) /* * Return whether the system supports a given xfeature. * * Also return the name of the (most advanced) feature that the caller requested: */ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name) { u64 xfeatures_missing = xfeatures_needed & ~fpu_kernel_cfg.max_features; if (unlikely(feature_name)) { long xfeature_idx, max_idx; u64 xfeatures_print; /* * So we use FLS here to be able to print the most advanced * feature that was requested but is missing. So if a driver * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the * missing AVX feature - this is the most informative message * to users: */ if (xfeatures_missing) xfeatures_print = xfeatures_missing; else xfeatures_print = xfeatures_needed; xfeature_idx = fls64(xfeatures_print)-1; max_idx = ARRAY_SIZE(xfeature_names)-1; xfeature_idx = min(xfeature_idx, max_idx); *feature_name = xfeature_names[xfeature_idx]; } if (xfeatures_missing) return 0; return 1; } EXPORT_SYMBOL_GPL(cpu_has_xfeatures); static bool xfeature_is_aligned64(int xfeature_nr) { return xstate_flags[xfeature_nr] & XSTATE_FLAG_ALIGNED64; } static bool xfeature_is_supervisor(int xfeature_nr) { return xstate_flags[xfeature_nr] & XSTATE_FLAG_SUPERVISOR; } static unsigned int xfeature_get_offset(u64 xcomp_bv, int xfeature) { unsigned int offs, i; /* * Non-compacted format and legacy features use the cached fixed * offsets. */ if (!cpu_feature_enabled(X86_FEATURE_XCOMPACTED) || xfeature <= XFEATURE_SSE) return xstate_offsets[xfeature]; /* * Compacted format offsets depend on the actual content of the * compacted xsave area which is determined by the xcomp_bv header * field. */ offs = FXSAVE_SIZE + XSAVE_HDR_SIZE; for_each_extended_xfeature(i, xcomp_bv) { if (xfeature_is_aligned64(i)) offs = ALIGN(offs, 64); if (i == xfeature) break; offs += xstate_sizes[i]; } return offs; } /* * Enable the extended processor state save/restore feature. * Called once per CPU onlining. */ void fpu__init_cpu_xstate(void) { if (!boot_cpu_has(X86_FEATURE_XSAVE) || !fpu_kernel_cfg.max_features) return; cr4_set_bits(X86_CR4_OSXSAVE); /* * Must happen after CR4 setup and before xsetbv() to allow KVM * lazy passthrough. Write independent of the dynamic state static * key as that does not work on the boot CPU. This also ensures * that any stale state is wiped out from XFD. Reset the per CPU * xfd cache too. */ if (cpu_feature_enabled(X86_FEATURE_XFD)) xfd_set_state(init_fpstate.xfd); /* * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features * managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user * states can be set here. */ xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features); /* * MSR_IA32_XSS sets supervisor states managed by XSAVES. */ if (boot_cpu_has(X86_FEATURE_XSAVES)) { wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | xfeatures_mask_independent()); } } static bool xfeature_enabled(enum xfeature xfeature) { return fpu_kernel_cfg.max_features & BIT_ULL(xfeature); } /* * Record the offsets and sizes of various xstates contained * in the XSAVE state memory layout. */ static void __init setup_xstate_cache(void) { u32 eax, ebx, ecx, edx, i; /* start at the beginning of the "extended state" */ unsigned int last_good_offset = offsetof(struct xregs_state, extended_state_area); /* * The FP xstates and SSE xstates are legacy states. They are always * in the fixed offsets in the xsave area in either compacted form * or standard form. */ xstate_offsets[XFEATURE_FP] = 0; xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state, xmm_space); xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP]; xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state, xmm_space); for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) { cpuid_count(CPUID_LEAF_XSTATE, i, &eax, &ebx, &ecx, &edx); xstate_sizes[i] = eax; xstate_flags[i] = ecx; /* * If an xfeature is supervisor state, the offset in EBX is * invalid, leave it to -1. */ if (xfeature_is_supervisor(i)) continue; xstate_offsets[i] = ebx; /* * In our xstate size checks, we assume that the highest-numbered * xstate feature has the highest offset in the buffer. Ensure * it does. */ WARN_ONCE(last_good_offset > xstate_offsets[i], "x86/fpu: misordered xstate at %d\n", last_good_offset); last_good_offset = xstate_offsets[i]; } } /* * Print out all the supported xstate features: */ static void __init print_xstate_features(void) { int i; for (i = 0; i < XFEATURE_MAX; i++) { u64 mask = BIT_ULL(i); const char *name; if (cpu_has_xfeatures(mask, &name)) pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", mask, name); } } /* * This check is important because it is easy to get XSTATE_* * confused with XSTATE_BIT_*. */ #define CHECK_XFEATURE(nr) do { \ WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \ WARN_ON(nr >= XFEATURE_MAX); \ } while (0) /* * Print out xstate component offsets and sizes */ static void __init print_xstate_offset_size(void) { int i; for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) { pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n", i, xfeature_get_offset(fpu_kernel_cfg.max_features, i), i, xstate_sizes[i]); } } /* * This function is called only during boot time when x86 caps are not set * up and alternative can not be used yet. */ static __init void os_xrstor_booting(struct xregs_state *xstate) { u64 mask = fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSTATE; u32 lmask = mask; u32 hmask = mask >> 32; int err; if (cpu_feature_enabled(X86_FEATURE_XSAVES)) XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); else XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); /* * We should never fault when copying from a kernel buffer, and the FPU * state we set at boot time should be valid. */ WARN_ON_FPU(err); } /* * All supported features have either init state all zeros or are * handled in setup_init_fpu() individually. This is an explicit * feature list and does not use XFEATURE_MASK*SUPPORTED to catch * newly added supported features at build time and make people * actually look at the init state for the new feature. */ #define XFEATURES_INIT_FPSTATE_HANDLED \ (XFEATURE_MASK_FP | \ XFEATURE_MASK_SSE | \ XFEATURE_MASK_YMM | \ XFEATURE_MASK_OPMASK | \ XFEATURE_MASK_ZMM_Hi256 | \ XFEATURE_MASK_Hi16_ZMM | \ XFEATURE_MASK_PKRU | \ XFEATURE_MASK_BNDREGS | \ XFEATURE_MASK_BNDCSR | \ XFEATURE_MASK_PASID | \ XFEATURE_MASK_CET_USER | \ XFEATURE_MASK_XTILE) /* * setup the xstate image representing the init state */ static void __init setup_init_fpu_buf(void) { BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED | XFEATURE_MASK_SUPERVISOR_SUPPORTED) != XFEATURES_INIT_FPSTATE_HANDLED); if (!boot_cpu_has(X86_FEATURE_XSAVE)) return; print_xstate_features(); xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures); /* * Init all the features state with header.xfeatures being 0x0 */ os_xrstor_booting(&init_fpstate.regs.xsave); /* * All components are now in init state. Read the state back so * that init_fpstate contains all non-zero init state. This only * works with XSAVE, but not with XSAVEOPT and XSAVEC/S because * those use the init optimization which skips writing data for * components in init state. * * XSAVE could be used, but that would require to reshuffle the * data when XSAVEC/S is available because XSAVEC/S uses xstate * compaction. But doing so is a pointless exercise because most * components have an all zeros init state except for the legacy * ones (FP and SSE). Those can be saved with FXSAVE into the * legacy area. Adding new features requires to ensure that init * state is all zeroes or if not to add the necessary handling * here. */ fxsave(&init_fpstate.regs.fxsave); } int xfeature_size(int xfeature_nr) { u32 eax, ebx, ecx, edx; CHECK_XFEATURE(xfeature_nr); cpuid_count(CPUID_LEAF_XSTATE, xfeature_nr, &eax, &ebx, &ecx, &edx); return eax; } /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */ static int validate_user_xstate_header(const struct xstate_header *hdr, struct fpstate *fpstate) { /* No unknown or supervisor features may be set */ if (hdr->xfeatures & ~fpstate->user_xfeatures) return -EINVAL; /* Userspace must use the uncompacted format */ if (hdr->xcomp_bv) return -EINVAL; /* * If 'reserved' is shrunken to add a new field, make sure to validate * that new field here! */ BUILD_BUG_ON(sizeof(hdr->reserved) != 48); /* No reserved bits may be set */ if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved))) return -EINVAL; return 0; } static void __init __xstate_dump_leaves(void) { int i; u32 eax, ebx, ecx, edx; static int should_dump = 1; if (!should_dump) return; should_dump = 0; /* * Dump out a few leaves past the ones that we support * just in case there are some goodies up there */ for (i = 0; i < XFEATURE_MAX + 10; i++) { cpuid_count(CPUID_LEAF_XSTATE, i, &eax, &ebx, &ecx, &edx); pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n", CPUID_LEAF_XSTATE, i, eax, ebx, ecx, edx); } } #define XSTATE_WARN_ON(x, fmt, ...) do { \ if (WARN_ONCE(x, "XSAVE consistency problem: " fmt, ##__VA_ARGS__)) { \ __xstate_dump_leaves(); \ } \ } while (0) #define XCHECK_SZ(sz, nr, __struct) ({ \ if (WARN_ONCE(sz != sizeof(__struct), \ "[%s]: struct is %zu bytes, cpu state %d bytes\n", \ xfeature_names[nr], sizeof(__struct), sz)) { \ __xstate_dump_leaves(); \ } \ true; \ }) /** * check_xtile_data_against_struct - Check tile data state size. * * Calculate the state size by multiplying the single tile size which is * recorded in a C struct, and the number of tiles that the CPU informs. * Compare the provided size with the calculation. * * @size: The tile data state size * * Returns: 0 on success, -EINVAL on mismatch. */ static int __init check_xtile_data_against_struct(int size) { u32 max_palid, palid, state_size; u32 eax, ebx, ecx, edx; u16 max_tile; /* * Check the maximum palette id: * eax: the highest numbered palette subleaf. */ cpuid_count(CPUID_LEAF_TILE, 0, &max_palid, &ebx, &ecx, &edx); /* * Cross-check each tile size and find the maximum number of * supported tiles. */ for (palid = 1, max_tile = 0; palid <= max_palid; palid++) { u16 tile_size, max; /* * Check the tile size info: * eax[31:16]: bytes per title * ebx[31:16]: the max names (or max number of tiles) */ cpuid_count(CPUID_LEAF_TILE, palid, &eax, &ebx, &edx, &edx); tile_size = eax >> 16; max = ebx >> 16; if (tile_size != sizeof(struct xtile_data)) { pr_err("%s: struct is %zu bytes, cpu xtile %d bytes\n", __stringify(XFEATURE_XTILE_DATA), sizeof(struct xtile_data), tile_size); __xstate_dump_leaves(); return -EINVAL; } if (max > max_tile) max_tile = max; } state_size = sizeof(struct xtile_data) * max_tile; if (size != state_size) { pr_err("%s: calculated size is %u bytes, cpu state %d bytes\n", __stringify(XFEATURE_XTILE_DATA), state_size, size); __xstate_dump_leaves(); return -EINVAL; } return 0; } /* * We have a C struct for each 'xstate'. We need to ensure * that our software representation matches what the CPU * tells us about the state's size. */ static bool __init check_xstate_against_struct(int nr) { /* * Ask the CPU for the size of the state. */ int sz = xfeature_size(nr); /* * Match each CPU state with the corresponding software * structure. */ switch (nr) { case XFEATURE_YMM: return XCHECK_SZ(sz, nr, struct ymmh_struct); case XFEATURE_BNDREGS: return XCHECK_SZ(sz, nr, struct mpx_bndreg_state); case XFEATURE_BNDCSR: return XCHECK_SZ(sz, nr, struct mpx_bndcsr_state); case XFEATURE_OPMASK: return XCHECK_SZ(sz, nr, struct avx_512_opmask_state); case XFEATURE_ZMM_Hi256: return XCHECK_SZ(sz, nr, struct avx_512_zmm_uppers_state); case XFEATURE_Hi16_ZMM: return XCHECK_SZ(sz, nr, struct avx_512_hi16_state); case XFEATURE_PKRU: return XCHECK_SZ(sz, nr, struct pkru_state); case XFEATURE_PASID: return XCHECK_SZ(sz, nr, struct ia32_pasid_state); case XFEATURE_XTILE_CFG: return XCHECK_SZ(sz, nr, struct xtile_cfg); case XFEATURE_CET_USER: return XCHECK_SZ(sz, nr, struct cet_user_state); case XFEATURE_XTILE_DATA: check_xtile_data_against_struct(sz); return true; default: XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr); return false; } return true; } static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted) { unsigned int topmost = fls64(xfeatures) - 1; unsigned int offset = xstate_offsets[topmost]; if (topmost <= XFEATURE_SSE) return sizeof(struct xregs_state); if (compacted) offset = xfeature_get_offset(xfeatures, topmost); return offset + xstate_sizes[topmost]; } /* * This essentially double-checks what the cpu told us about * how large the XSAVE buffer needs to be. We are recalculating * it to be safe. * * Independent XSAVE features allocate their own buffers and are not * covered by these checks. Only the size of the buffer for task->fpu * is checked here. */ static bool __init paranoid_xstate_size_valid(unsigned int kernel_size) { bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); bool xsaves = cpu_feature_enabled(X86_FEATURE_XSAVES); unsigned int size = FXSAVE_SIZE + XSAVE_HDR_SIZE; int i; for_each_extended_xfeature(i, fpu_kernel_cfg.max_features) { if (!check_xstate_against_struct(i)) return false; /* * Supervisor state components can be managed only by * XSAVES. */ if (!xsaves && xfeature_is_supervisor(i)) { XSTATE_WARN_ON(1, "Got supervisor feature %d, but XSAVES not advertised\n", i); return false; } } size = xstate_calculate_size(fpu_kernel_cfg.max_features, compacted); XSTATE_WARN_ON(size != kernel_size, "size %u != kernel_size %u\n", size, kernel_size); return size == kernel_size; } /* * Get total size of enabled xstates in XCR0 | IA32_XSS. * * Note the SDM's wording here. "sub-function 0" only enumerates * the size of the *user* states. If we use it to size a buffer * that we use 'XSAVES' on, we could potentially overflow the * buffer because 'XSAVES' saves system states too. * * This also takes compaction into account. So this works for * XSAVEC as well. */ static unsigned int __init get_compacted_size(void) { unsigned int eax, ebx, ecx, edx; /* * - CPUID function 0DH, sub-function 1: * EBX enumerates the size (in bytes) required by * the XSAVES instruction for an XSAVE area * containing all the state components * corresponding to bits currently set in * XCR0 | IA32_XSS. * * When XSAVES is not available but XSAVEC is (virt), then there * are no supervisor states, but XSAVEC still uses compacted * format. */ cpuid_count(CPUID_LEAF_XSTATE, 1, &eax, &ebx, &ecx, &edx); return ebx; } /* * Get the total size of the enabled xstates without the independent supervisor * features. */ static unsigned int __init get_xsave_compacted_size(void) { u64 mask = xfeatures_mask_independent(); unsigned int size; if (!mask) return get_compacted_size(); /* Disable independent features. */ wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()); /* * Ask the hardware what size is required of the buffer. * This is the size required for the task->fpu buffer. */ size = get_compacted_size(); /* Re-enable independent features so XSAVES will work on them again. */ wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask); return size; } static unsigned int __init get_xsave_size_user(void) { unsigned int eax, ebx, ecx, edx; /* * - CPUID function 0DH, sub-function 0: * EBX enumerates the size (in bytes) required by * the XSAVE instruction for an XSAVE area * containing all the *user* state components * corresponding to bits currently set in XCR0. */ cpuid_count(CPUID_LEAF_XSTATE, 0, &eax, &ebx, &ecx, &edx); return ebx; } static int __init init_xstate_size(void) { /* Recompute the context size for enabled features: */ unsigned int user_size, kernel_size, kernel_default_size; bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); /* Uncompacted user space size */ user_size = get_xsave_size_user(); /* * XSAVES kernel size includes supervisor states and uses compacted * format. XSAVEC uses compacted format, but does not save * supervisor states. * * XSAVE[OPT] do not support supervisor states so kernel and user * size is identical. */ if (compacted) kernel_size = get_xsave_compacted_size(); else kernel_size = user_size; kernel_default_size = xstate_calculate_size(fpu_kernel_cfg.default_features, compacted); if (!paranoid_xstate_size_valid(kernel_size)) return -EINVAL; fpu_kernel_cfg.max_size = kernel_size; fpu_user_cfg.max_size = user_size; fpu_kernel_cfg.default_size = kernel_default_size; fpu_user_cfg.default_size = xstate_calculate_size(fpu_user_cfg.default_features, false); return 0; } /* * We enabled the XSAVE hardware, but something went wrong and * we can not use it. Disable it. */ static void __init fpu__init_disable_system_xstate(unsigned int legacy_size) { fpu_kernel_cfg.max_features = 0; cr4_clear_bits(X86_CR4_OSXSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVE); /* Restore the legacy size.*/ fpu_kernel_cfg.max_size = legacy_size; fpu_kernel_cfg.default_size = legacy_size; fpu_user_cfg.max_size = legacy_size; fpu_user_cfg.default_size = legacy_size; /* * Prevent enabling the static branch which enables writes to the * XFD MSR. */ init_fpstate.xfd = 0; fpstate_reset(&current->thread.fpu); } /* * Enable and initialize the xsave feature. * Called once per system bootup. */ void __init fpu__init_system_xstate(unsigned int legacy_size) { unsigned int eax, ebx, ecx, edx; u64 xfeatures; int err; int i; if (!boot_cpu_has(X86_FEATURE_FPU)) { pr_info("x86/fpu: No FPU detected\n"); return; } if (!boot_cpu_has(X86_FEATURE_XSAVE)) { pr_info("x86/fpu: x87 FPU will use %s\n", boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE"); return; } /* * Find user xstates supported by the processor. */ cpuid_count(CPUID_LEAF_XSTATE, 0, &eax, &ebx, &ecx, &edx); fpu_kernel_cfg.max_features = eax + ((u64)edx << 32); /* * Find supervisor xstates supported by the processor. */ cpuid_count(CPUID_LEAF_XSTATE, 1, &eax, &ebx, &ecx, &edx); fpu_kernel_cfg.max_features |= ecx + ((u64)edx << 32); if ((fpu_kernel_cfg.max_features & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) { /* * This indicates that something really unexpected happened * with the enumeration. Disable XSAVE and try to continue * booting without it. This is too early to BUG(). */ pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", fpu_kernel_cfg.max_features); goto out_disable; } fpu_kernel_cfg.independent_features = fpu_kernel_cfg.max_features & XFEATURE_MASK_INDEPENDENT; /* * Clear XSAVE features that are disabled in the normal CPUID. */ for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) { unsigned short cid = xsave_cpuid_features[i]; /* Careful: X86_FEATURE_FPU is 0! */ if ((i != XFEATURE_FP && !cid) || !boot_cpu_has(cid)) fpu_kernel_cfg.max_features &= ~BIT_ULL(i); } if (!cpu_feature_enabled(X86_FEATURE_XFD)) fpu_kernel_cfg.max_features &= ~XFEATURE_MASK_USER_DYNAMIC; if (!cpu_feature_enabled(X86_FEATURE_XSAVES)) fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED; else fpu_kernel_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED | XFEATURE_MASK_SUPERVISOR_SUPPORTED; fpu_user_cfg.max_features = fpu_kernel_cfg.max_features; fpu_user_cfg.max_features &= XFEATURE_MASK_USER_SUPPORTED; /* Clean out dynamic features from default */ fpu_kernel_cfg.default_features = fpu_kernel_cfg.max_features; fpu_kernel_cfg.default_features &= ~XFEATURE_MASK_USER_DYNAMIC; fpu_user_cfg.default_features = fpu_user_cfg.max_features; fpu_user_cfg.default_features &= ~XFEATURE_MASK_USER_DYNAMIC; /* Store it for paranoia check at the end */ xfeatures = fpu_kernel_cfg.max_features; /* * Initialize the default XFD state in initfp_state and enable the * dynamic sizing mechanism if dynamic states are available. The * static key cannot be enabled here because this runs before * jump_label_init(). This is delayed to an initcall. */ init_fpstate.xfd = fpu_user_cfg.max_features & XFEATURE_MASK_USER_DYNAMIC; /* Set up compaction feature bit */ if (cpu_feature_enabled(X86_FEATURE_XSAVEC) || cpu_feature_enabled(X86_FEATURE_XSAVES)) setup_force_cpu_cap(X86_FEATURE_XCOMPACTED); /* Enable xstate instructions to be able to continue with initialization: */ fpu__init_cpu_xstate(); /* Cache size, offset and flags for initialization */ setup_xstate_cache(); err = init_xstate_size(); if (err) goto out_disable; /* Reset the state for the current task */ fpstate_reset(&current->thread.fpu); /* * Update info used for ptrace frames; use standard-format size and no * supervisor xstates: */ update_regset_xstate_info(fpu_user_cfg.max_size, fpu_user_cfg.max_features); /* * init_fpstate excludes dynamic states as they are large but init * state is zero. */ init_fpstate.size = fpu_kernel_cfg.default_size; init_fpstate.xfeatures = fpu_kernel_cfg.default_features; if (init_fpstate.size > sizeof(init_fpstate.regs)) { pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n", sizeof(init_fpstate.regs), init_fpstate.size); goto out_disable; } setup_init_fpu_buf(); /* * Paranoia check whether something in the setup modified the * xfeatures mask. */ if (xfeatures != fpu_kernel_cfg.max_features) { pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n", xfeatures, fpu_kernel_cfg.max_features); goto out_disable; } /* * CPU capabilities initialization runs before FPU init. So * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely * functional, set the feature bit so depending code works. */ setup_force_cpu_cap(X86_FEATURE_OSXSAVE); print_xstate_offset_size(); pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", fpu_kernel_cfg.max_features, fpu_kernel_cfg.max_size, boot_cpu_has(X86_FEATURE_XCOMPACTED) ? "compacted" : "standard"); return; out_disable: /* something went wrong, try to boot without any XSAVE support */ fpu__init_disable_system_xstate(legacy_size); } /* * Restore minimal FPU state after suspend: */ void fpu__resume_cpu(void) { /* * Restore XCR0 on xsave capable CPUs: */ if (cpu_feature_enabled(X86_FEATURE_XSAVE)) xsetbv(XCR_XFEATURE_ENABLED_MASK, fpu_user_cfg.max_features); /* * Restore IA32_XSS. The same CPUID bit enumerates support * of XSAVES and MSR_IA32_XSS. */ if (cpu_feature_enabled(X86_FEATURE_XSAVES)) { wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | xfeatures_mask_independent()); } if (fpu_state_size_dynamic()) wrmsrl(MSR_IA32_XFD, current->thread.fpu.fpstate->xfd); } /* * Given an xstate feature nr, calculate where in the xsave * buffer the state is. Callers should ensure that the buffer * is valid. */ static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr) { u64 xcomp_bv = xsave->header.xcomp_bv; if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr))) return NULL; if (cpu_feature_enabled(X86_FEATURE_XCOMPACTED)) { if (WARN_ON_ONCE(!(xcomp_bv & BIT_ULL(xfeature_nr)))) return NULL; } return (void *)xsave + xfeature_get_offset(xcomp_bv, xfeature_nr); } /* * Given the xsave area and a state inside, this function returns the * address of the state. * * This is the API that is called to get xstate address in either * standard format or compacted format of xsave area. * * Note that if there is no data for the field in the xsave buffer * this will return NULL. * * Inputs: * xstate: the thread's storage area for all FPU data * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP, * XFEATURE_SSE, etc...) * Output: * address of the state in the xsave area, or NULL if the * field is not present in the xsave buffer. */ void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr) { /* * Do we even *have* xsave state? */ if (!boot_cpu_has(X86_FEATURE_XSAVE)) return NULL; /* * We should not ever be requesting features that we * have not enabled. */ if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr))) return NULL; /* * This assumes the last 'xsave*' instruction to * have requested that 'xfeature_nr' be saved. * If it did not, we might be seeing and old value * of the field in the buffer. * * This can happen because the last 'xsave' did not * request that this feature be saved (unlikely) * or because the "init optimization" caused it * to not be saved. */ if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr))) return NULL; return __raw_xsave_addr(xsave, xfeature_nr); } EXPORT_SYMBOL_GPL(get_xsave_addr); /* * Given an xstate feature nr, calculate where in the xsave buffer the state is. * The xsave buffer should be in standard format, not compacted (e.g. user mode * signal frames). */ void __user *get_xsave_addr_user(struct xregs_state __user *xsave, int xfeature_nr) { if (WARN_ON_ONCE(!xfeature_enabled(xfeature_nr))) return NULL; return (void __user *)xsave + xstate_offsets[xfeature_nr]; } #ifdef CONFIG_ARCH_HAS_PKEYS /* * This will go out and modify PKRU register to set the access * rights for @pkey to @init_val. */ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val) { u32 old_pkru, new_pkru_bits = 0; int pkey_shift; /* * This check implies XSAVE support. OSPKE only gets * set if we enable XSAVE and we enable PKU in XCR0. */ if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return -EINVAL; /* * This code should only be called with valid 'pkey' * values originating from in-kernel users. Complain * if a bad value is observed. */ if (WARN_ON_ONCE(pkey >= arch_max_pkey())) return -EINVAL; /* Set the bits we need in PKRU: */ if (init_val & PKEY_DISABLE_ACCESS) new_pkru_bits |= PKRU_AD_BIT; if (init_val & PKEY_DISABLE_WRITE) new_pkru_bits |= PKRU_WD_BIT; /* Shift the bits in to the correct place in PKRU for pkey: */ pkey_shift = pkey * PKRU_BITS_PER_PKEY; new_pkru_bits <<= pkey_shift; /* Get old PKRU and mask off any old bits in place: */ old_pkru = read_pkru(); old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift); /* Write old part along with new part: */ write_pkru(old_pkru | new_pkru_bits); return 0; } #endif /* ! CONFIG_ARCH_HAS_PKEYS */ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate, void *init_xstate, unsigned int size) { membuf_write(to, from_xstate ? xstate : init_xstate, size); } /** * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer * @to: membuf descriptor * @fpstate: The fpstate buffer from which to copy * @xfeatures: The mask of xfeatures to save (XSAVE mode only) * @pkru_val: The PKRU value to store in the PKRU component * @copy_mode: The requested copy mode * * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming * format, i.e. from the kernel internal hardware dependent storage format * to the requested @mode. UABI XSTATE is always uncompacted! * * It supports partial copy but @to.pos always starts from zero. */ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, u64 xfeatures, u32 pkru_val, enum xstate_copy_mode copy_mode) { const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr); struct xregs_state *xinit = &init_fpstate.regs.xsave; struct xregs_state *xsave = &fpstate->regs.xsave; struct xstate_header header; unsigned int zerofrom; u64 mask; int i; memset(&header, 0, sizeof(header)); header.xfeatures = xsave->header.xfeatures; /* Mask out the feature bits depending on copy mode */ switch (copy_mode) { case XSTATE_COPY_FP: header.xfeatures &= XFEATURE_MASK_FP; break; case XSTATE_COPY_FX: header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE; break; case XSTATE_COPY_XSAVE: header.xfeatures &= fpstate->user_xfeatures & xfeatures; break; } /* Copy FP state up to MXCSR */ copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387, &xinit->i387, off_mxcsr); /* Copy MXCSR when SSE or YMM are set in the feature mask */ copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM), &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr, MXCSR_AND_FLAGS_SIZE); /* Copy the remaining FP state */ copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387.st_space, &xinit->i387.st_space, sizeof(xsave->i387.st_space)); /* Copy the SSE state - shared with YMM, but independently managed */ copy_feature(header.xfeatures & XFEATURE_MASK_SSE, &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space, sizeof(xsave->i387.xmm_space)); if (copy_mode != XSTATE_COPY_XSAVE) goto out; /* Zero the padding area */ membuf_zero(&to, sizeof(xsave->i387.padding)); /* Copy xsave->i387.sw_reserved */ membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved)); /* Copy the user space relevant state of @xsave->header */ membuf_write(&to, &header, sizeof(header)); zerofrom = offsetof(struct xregs_state, extended_state_area); /* * This 'mask' indicates which states to copy from fpstate. * Those extended states that are not present in fpstate are * either disabled or initialized: * * In non-compacted format, disabled features still occupy * state space but there is no state to copy from in the * compacted init_fpstate. The gap tracking will zero these * states. * * The extended features have an all zeroes init state. Thus, * remove them from 'mask' to zero those features in the user * buffer instead of retrieving them from init_fpstate. */ mask = header.xfeatures; for_each_extended_xfeature(i, mask) { /* * If there was a feature or alignment gap, zero the space * in the destination buffer. */ if (zerofrom < xstate_offsets[i]) membuf_zero(&to, xstate_offsets[i] - zerofrom); if (i == XFEATURE_PKRU) { struct pkru_state pkru = {0}; /* * PKRU is not necessarily up to date in the * XSAVE buffer. Use the provided value. */ pkru.pkru = pkru_val; membuf_write(&to, &pkru, sizeof(pkru)); } else { membuf_write(&to, __raw_xsave_addr(xsave, i), xstate_sizes[i]); } /* * Keep track of the last copied state in the non-compacted * target buffer for gap zeroing. */ zerofrom = xstate_offsets[i] + xstate_sizes[i]; } out: if (to.left) membuf_zero(&to, to.left); } /** * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer * @to: membuf descriptor * @tsk: The task from which to copy the saved xstate * @copy_mode: The requested copy mode * * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming * format, i.e. from the kernel internal hardware dependent storage format * to the requested @mode. UABI XSTATE is always uncompacted! * * It supports partial copy but @to.pos always starts from zero. */ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk, enum xstate_copy_mode copy_mode) { __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate, tsk->thread.fpu.fpstate->user_xfeatures, tsk->thread.pkru, copy_mode); } static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size, const void *kbuf, const void __user *ubuf) { if (kbuf) { memcpy(dst, kbuf + offset, size); } else { if (copy_from_user(dst, ubuf + offset, size)) return -EFAULT; } return 0; } /** * copy_uabi_to_xstate - Copy a UABI format buffer to the kernel xstate * @fpstate: The fpstate buffer to copy to * @kbuf: The UABI format buffer, if it comes from the kernel * @ubuf: The UABI format buffer, if it comes from userspace * @pkru: The location to write the PKRU value to * * Converts from the UABI format into the kernel internal hardware * dependent format. * * This function ultimately has three different callers with distinct PKRU * behavior. * 1. When called from sigreturn the PKRU register will be restored from * @fpstate via an XRSTOR. Correctly copying the UABI format buffer to * @fpstate is sufficient to cover this case, but the caller will also * pass a pointer to the thread_struct's pkru field in @pkru and updating * it is harmless. * 2. When called from ptrace the PKRU register will be restored from the * thread_struct's pkru field. A pointer to that is passed in @pkru. * The kernel will restore it manually, so the XRSTOR behavior that resets * the PKRU register to the hardware init value (0) if the corresponding * xfeatures bit is not set is emulated here. * 3. When called from KVM the PKRU register will be restored from the vcpu's * pkru field. A pointer to that is passed in @pkru. KVM hasn't used * XRSTOR and hasn't had the PKRU resetting behavior described above. To * preserve that KVM behavior, it passes NULL for @pkru if the xfeatures * bit is not set. */ static int copy_uabi_to_xstate(struct fpstate *fpstate, const void *kbuf, const void __user *ubuf, u32 *pkru) { struct xregs_state *xsave = &fpstate->regs.xsave; unsigned int offset, size; struct xstate_header hdr; u64 mask; int i; offset = offsetof(struct xregs_state, header); if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf)) return -EFAULT; if (validate_user_xstate_header(&hdr, fpstate)) return -EINVAL; /* Validate MXCSR when any of the related features is in use */ mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM; if (hdr.xfeatures & mask) { u32 mxcsr[2]; offset = offsetof(struct fxregs_state, mxcsr); if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf)) return -EFAULT; /* Reserved bits in MXCSR must be zero. */ if (mxcsr[0] & ~mxcsr_feature_mask) return -EINVAL; /* SSE and YMM require MXCSR even when FP is not in use. */ if (!(hdr.xfeatures & XFEATURE_MASK_FP)) { xsave->i387.mxcsr = mxcsr[0]; xsave->i387.mxcsr_mask = mxcsr[1]; } } for (i = 0; i < XFEATURE_MAX; i++) { mask = BIT_ULL(i); if (hdr.xfeatures & mask) { void *dst = __raw_xsave_addr(xsave, i); offset = xstate_offsets[i]; size = xstate_sizes[i]; if (copy_from_buffer(dst, offset, size, kbuf, ubuf)) return -EFAULT; } } if (hdr.xfeatures & XFEATURE_MASK_PKRU) { struct pkru_state *xpkru; xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU); *pkru = xpkru->pkru; } else { /* * KVM may pass NULL here to indicate that it does not need * PKRU updated. */ if (pkru) *pkru = 0; } /* * The state that came in from userspace was user-state only. * Mask all the user states out of 'xfeatures': */ xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL; /* * Add back in the features that came in from userspace: */ xsave->header.xfeatures |= hdr.xfeatures; return 0; } /* * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S] * format and copy to the target thread. Used by ptrace and KVM. */ int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru) { return copy_uabi_to_xstate(fpstate, kbuf, NULL, pkru); } /* * Convert from a sigreturn standard-format user-space buffer to kernel * XSAVE[S] format and copy to the target thread. This is called from the * sigreturn() and rt_sigreturn() system calls. */ int copy_sigframe_from_user_to_xstate(struct task_struct *tsk, const void __user *ubuf) { return copy_uabi_to_xstate(tsk->thread.fpu.fpstate, NULL, ubuf, &tsk->thread.pkru); } static bool validate_independent_components(u64 mask) { u64 xchk; if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES))) return false; xchk = ~xfeatures_mask_independent(); if (WARN_ON_ONCE(!mask || mask & xchk)) return false; return true; } /** * xsaves - Save selected components to a kernel xstate buffer * @xstate: Pointer to the buffer * @mask: Feature mask to select the components to save * * The @xstate buffer must be 64 byte aligned and correctly initialized as * XSAVES does not write the full xstate header. Before first use the * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer * can #GP. * * The feature mask must be a subset of the independent features. */ void xsaves(struct xregs_state *xstate, u64 mask) { int err; if (!validate_independent_components(mask)) return; XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err); WARN_ON_ONCE(err); } /** * xrstors - Restore selected components from a kernel xstate buffer * @xstate: Pointer to the buffer * @mask: Feature mask to select the components to restore * * The @xstate buffer must be 64 byte aligned and correctly initialized * otherwise XRSTORS from that buffer can #GP. * * Proper usage is to restore the state which was saved with * xsaves() into @xstate. * * The feature mask must be a subset of the independent features. */ void xrstors(struct xregs_state *xstate, u64 mask) { int err; if (!validate_independent_components(mask)) return; XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err); WARN_ON_ONCE(err); } #if IS_ENABLED(CONFIG_KVM) void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature) { void *addr = get_xsave_addr(&fps->regs.xsave, xfeature); if (addr) memset(addr, 0, xstate_sizes[xfeature]); } EXPORT_SYMBOL_GPL(fpstate_clear_xstate_component); #endif #ifdef CONFIG_X86_64 #ifdef CONFIG_X86_DEBUG_FPU /* * Ensure that a subsequent XSAVE* or XRSTOR* instruction with RFBM=@mask * can safely operate on the @fpstate buffer. */ static bool xstate_op_valid(struct fpstate *fpstate, u64 mask, bool rstor) { u64 xfd = __this_cpu_read(xfd_state); if (fpstate->xfd == xfd) return true; /* * The XFD MSR does not match fpstate->xfd. That's invalid when * the passed in fpstate is current's fpstate. */ if (fpstate->xfd == current->thread.fpu.fpstate->xfd) return false; /* * XRSTOR(S) from init_fpstate are always correct as it will just * bring all components into init state and not read from the * buffer. XSAVE(S) raises #PF after init. */ if (fpstate == &init_fpstate) return rstor; /* * XSAVE(S): clone(), fpu_swap_kvm_fpstate() * XRSTORS(S): fpu_swap_kvm_fpstate() */ /* * No XSAVE/XRSTOR instructions (except XSAVE itself) touch * the buffer area for XFD-disabled state components. */ mask &= ~xfd; /* * Remove features which are valid in fpstate. They * have space allocated in fpstate. */ mask &= ~fpstate->xfeatures; /* * Any remaining state components in 'mask' might be written * by XSAVE/XRSTOR. Fail validation it found. */ return !mask; } void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rstor) { WARN_ON_ONCE(!xstate_op_valid(fpstate, mask, rstor)); } #endif /* CONFIG_X86_DEBUG_FPU */ static int __init xfd_update_static_branch(void) { /* * If init_fpstate.xfd has bits set then dynamic features are * available and the dynamic sizing must be enabled. */ if (init_fpstate.xfd) static_branch_enable(&__fpu_state_size_dynamic); return 0; } arch_initcall(xfd_update_static_branch) void fpstate_free(struct fpu *fpu) { if (fpu->fpstate && fpu->fpstate != &fpu->__fpstate) vfree(fpu->fpstate); } /** * fpstate_realloc - Reallocate struct fpstate for the requested new features * * @xfeatures: A bitmap of xstate features which extend the enabled features * of that task * @ksize: The required size for the kernel buffer * @usize: The required size for user space buffers * @guest_fpu: Pointer to a guest FPU container. NULL for host allocations * * Note vs. vmalloc(): If the task with a vzalloc()-allocated buffer * terminates quickly, vfree()-induced IPIs may be a concern, but tasks * with large states are likely to live longer. * * Returns: 0 on success, -ENOMEM on allocation error. */ static int fpstate_realloc(u64 xfeatures, unsigned int ksize, unsigned int usize, struct fpu_guest *guest_fpu) { struct fpu *fpu = &current->thread.fpu; struct fpstate *curfps, *newfps = NULL; unsigned int fpsize; bool in_use; fpsize = ksize + ALIGN(offsetof(struct fpstate, regs), 64); newfps = vzalloc(fpsize); if (!newfps) return -ENOMEM; newfps->size = ksize; newfps->user_size = usize; newfps->is_valloc = true; /* * When a guest FPU is supplied, use @guest_fpu->fpstate * as reference independent whether it is in use or not. */ curfps = guest_fpu ? guest_fpu->fpstate : fpu->fpstate; /* Determine whether @curfps is the active fpstate */ in_use = fpu->fpstate == curfps; if (guest_fpu) { newfps->is_guest = true; newfps->is_confidential = curfps->is_confidential; newfps->in_use = curfps->in_use; guest_fpu->xfeatures |= xfeatures; guest_fpu->uabi_size = usize; } fpregs_lock(); /* * If @curfps is in use, ensure that the current state is in the * registers before swapping fpstate as that might invalidate it * due to layout changes. */ if (in_use && test_thread_flag(TIF_NEED_FPU_LOAD)) fpregs_restore_userregs(); newfps->xfeatures = curfps->xfeatures | xfeatures; newfps->user_xfeatures = curfps->user_xfeatures | xfeatures; newfps->xfd = curfps->xfd & ~xfeatures; /* Do the final updates within the locked region */ xstate_init_xcomp_bv(&newfps->regs.xsave, newfps->xfeatures); if (guest_fpu) { guest_fpu->fpstate = newfps; /* If curfps is active, update the FPU fpstate pointer */ if (in_use) fpu->fpstate = newfps; } else { fpu->fpstate = newfps; } if (in_use) xfd_update_state(fpu->fpstate); fpregs_unlock(); /* Only free valloc'ed state */ if (curfps && curfps->is_valloc) vfree(curfps); return 0; } static int validate_sigaltstack(unsigned int usize) { struct task_struct *thread, *leader = current->group_leader; unsigned long framesize = get_sigframe_size(); lockdep_assert_held(&current->sighand->siglock); /* get_sigframe_size() is based on fpu_user_cfg.max_size */ framesize -= fpu_user_cfg.max_size; framesize += usize; for_each_thread(leader, thread) { if (thread->sas_ss_size && thread->sas_ss_size < framesize) return -ENOSPC; } return 0; } static int __xstate_request_perm(u64 permitted, u64 requested, bool guest) { /* * This deliberately does not exclude !XSAVES as we still might * decide to optionally context switch XCR0 or talk the silicon * vendors into extending XFD for the pre AMX states, especially * AVX512. */ bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); struct fpu *fpu = &current->group_leader->thread.fpu; struct fpu_state_perm *perm; unsigned int ksize, usize; u64 mask; int ret = 0; /* Check whether fully enabled */ if ((permitted & requested) == requested) return 0; /* Calculate the resulting kernel state size */ mask = permitted | requested; /* Take supervisor states into account on the host */ if (!guest) mask |= xfeatures_mask_supervisor(); ksize = xstate_calculate_size(mask, compacted); /* Calculate the resulting user state size */ mask &= XFEATURE_MASK_USER_SUPPORTED; usize = xstate_calculate_size(mask, false); if (!guest) { ret = validate_sigaltstack(usize); if (ret) return ret; } perm = guest ? &fpu->guest_perm : &fpu->perm; /* Pairs with the READ_ONCE() in xstate_get_group_perm() */ WRITE_ONCE(perm->__state_perm, mask); /* Protected by sighand lock */ perm->__state_size = ksize; perm->__user_state_size = usize; return ret; } /* * Permissions array to map facilities with more than one component */ static const u64 xstate_prctl_req[XFEATURE_MAX] = { [XFEATURE_XTILE_DATA] = XFEATURE_MASK_XTILE_DATA, }; static int xstate_request_perm(unsigned long idx, bool guest) { u64 permitted, requested; int ret; if (idx >= XFEATURE_MAX) return -EINVAL; /* * Look up the facility mask which can require more than * one xstate component. */ idx = array_index_nospec(idx, ARRAY_SIZE(xstate_prctl_req)); requested = xstate_prctl_req[idx]; if (!requested) return -EOPNOTSUPP; if ((fpu_user_cfg.max_features & requested) != requested) return -EOPNOTSUPP; /* Lockless quick check */ permitted = xstate_get_group_perm(guest); if ((permitted & requested) == requested) return 0; /* Protect against concurrent modifications */ spin_lock_irq(&current->sighand->siglock); permitted = xstate_get_group_perm(guest); /* First vCPU allocation locks the permissions. */ if (guest && (permitted & FPU_GUEST_PERM_LOCKED)) ret = -EBUSY; else ret = __xstate_request_perm(permitted, requested, guest); spin_unlock_irq(&current->sighand->siglock); return ret; } int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu) { u64 xfd_event = xfd_err & XFEATURE_MASK_USER_DYNAMIC; struct fpu_state_perm *perm; unsigned int ksize, usize; struct fpu *fpu; if (!xfd_event) { if (!guest_fpu) pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err); return 0; } /* Protect against concurrent modifications */ spin_lock_irq(&current->sighand->siglock); /* If not permitted let it die */ if ((xstate_get_group_perm(!!guest_fpu) & xfd_event) != xfd_event) { spin_unlock_irq(&current->sighand->siglock); return -EPERM; } fpu = &current->group_leader->thread.fpu; perm = guest_fpu ? &fpu->guest_perm : &fpu->perm; ksize = perm->__state_size; usize = perm->__user_state_size; /* * The feature is permitted. State size is sufficient. Dropping * the lock is safe here even if more features are added from * another task, the retrieved buffer sizes are valid for the * currently requested feature(s). */ spin_unlock_irq(&current->sighand->siglock); /* * Try to allocate a new fpstate. If that fails there is no way * out. */ if (fpstate_realloc(xfd_event, ksize, usize, guest_fpu)) return -EFAULT; return 0; } int xfd_enable_feature(u64 xfd_err) { return __xfd_enable_feature(xfd_err, NULL); } #else /* CONFIG_X86_64 */ static inline int xstate_request_perm(unsigned long idx, bool guest) { return -EPERM; } #endif /* !CONFIG_X86_64 */ u64 xstate_get_guest_group_perm(void) { return xstate_get_group_perm(true); } EXPORT_SYMBOL_GPL(xstate_get_guest_group_perm); /** * fpu_xstate_prctl - xstate permission operations * @option: A subfunction of arch_prctl() * @arg2: option argument * Return: 0 if successful; otherwise, an error code * * Option arguments: * * ARCH_GET_XCOMP_SUPP: Pointer to user space u64 to store the info * ARCH_GET_XCOMP_PERM: Pointer to user space u64 to store the info * ARCH_REQ_XCOMP_PERM: Facility number requested * * For facilities which require more than one XSTATE component, the request * must be the highest state component number related to that facility, * e.g. for AMX which requires XFEATURE_XTILE_CFG(17) and * XFEATURE_XTILE_DATA(18) this would be XFEATURE_XTILE_DATA(18). */ long fpu_xstate_prctl(int option, unsigned long arg2) { u64 __user *uptr = (u64 __user *)arg2; u64 permitted, supported; unsigned long idx = arg2; bool guest = false; switch (option) { case ARCH_GET_XCOMP_SUPP: supported = fpu_user_cfg.max_features | fpu_user_cfg.legacy_features; return put_user(supported, uptr); case ARCH_GET_XCOMP_PERM: /* * Lockless snapshot as it can also change right after the * dropping the lock. */ permitted = xstate_get_host_group_perm(); permitted &= XFEATURE_MASK_USER_SUPPORTED; return put_user(permitted, uptr); case ARCH_GET_XCOMP_GUEST_PERM: permitted = xstate_get_guest_group_perm(); permitted &= XFEATURE_MASK_USER_SUPPORTED; return put_user(permitted, uptr); case ARCH_REQ_XCOMP_GUEST_PERM: guest = true; fallthrough; case ARCH_REQ_XCOMP_PERM: if (!IS_ENABLED(CONFIG_X86_64)) return -EOPNOTSUPP; return xstate_request_perm(idx, guest); default: return -EINVAL; } } #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * Report the amount of time elapsed in millisecond since last AVX512 * use in the task. */ static void avx512_status(struct seq_file *m, struct task_struct *task) { unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp); long delta; if (!timestamp) { /* * Report -1 if no AVX512 usage */ delta = -1; } else { delta = (long)(jiffies - timestamp); /* * Cap to LONG_MAX if time difference > LONG_MAX */ if (delta < 0) delta = LONG_MAX; delta = jiffies_to_msecs(delta); } seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta); seq_putc(m, '\n'); } /* * Report architecture specific information */ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { /* * Report AVX512 state if the processor and build option supported. */ if (cpu_feature_enabled(X86_FEATURE_AVX512F)) avx512_status(m, task); return 0; } #endif /* CONFIG_PROC_PID_ARCH_STATUS */ #ifdef CONFIG_COREDUMP static const char owner_name[] = "LINUX"; /* * Dump type, size, offset and flag values for every xfeature that is present. */ static int dump_xsave_layout_desc(struct coredump_params *cprm) { int num_records = 0; int i; for_each_extended_xfeature(i, fpu_user_cfg.max_features) { struct x86_xfeat_component xc = { .type = i, .size = xstate_sizes[i], .offset = xstate_offsets[i], /* reserved for future use */ .flags = 0, }; if (!dump_emit(cprm, &xc, sizeof(xc))) return 0; num_records++; } return num_records; } static u32 get_xsave_desc_size(void) { u32 cnt = 0; u32 i; for_each_extended_xfeature(i, fpu_user_cfg.max_features) cnt++; return cnt * (sizeof(struct x86_xfeat_component)); } int elf_coredump_extra_notes_write(struct coredump_params *cprm) { int num_records = 0; struct elf_note en; if (!fpu_user_cfg.max_features) return 0; en.n_namesz = sizeof(owner_name); en.n_descsz = get_xsave_desc_size(); en.n_type = NT_X86_XSAVE_LAYOUT; if (!dump_emit(cprm, &en, sizeof(en))) return 1; if (!dump_emit(cprm, owner_name, en.n_namesz)) return 1; if (!dump_align(cprm, 4)) return 1; num_records = dump_xsave_layout_desc(cprm); if (!num_records) return 1; /* Total size should be equal to the number of records */ if ((sizeof(struct x86_xfeat_component) * num_records) != en.n_descsz) return 1; return 0; } int elf_coredump_extra_notes_size(void) { int size; if (!fpu_user_cfg.max_features) return 0; /* .note header */ size = sizeof(struct elf_note); /* Name plus alignment to 4 bytes */ size += roundup(sizeof(owner_name), 4); size += get_xsave_desc_size(); return size; } #endif /* CONFIG_COREDUMP */
119 118 118 135 128 136 127 127 128 135 128 135 19 112 112 112 111 111 111 112 112 112 112 111 112 112 111 112 112 112 111 25 112 112 112 112 112 112 112 112 112 119 119 119 119 119 119 118 33 33 33 32 119 118 119 119 112 119 111 118 119 118 136 136 136 119 135 35 136 136 136 135 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 120 108 120 107 121 120 120 108 121 121 121 121 137 137 4 4 3 4 4 4 4 8 147 147 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 // SPDX-License-Identifier: LGPL-2.1-or-later /* * dvbdev.c * * Copyright (C) 2000 Ralph Metzler <ralph@convergence.de> * & Marcus Metzler <marcus@convergence.de> * for convergence integrated media GmbH */ #define pr_fmt(fmt) "dvbdev: " fmt #include <linux/types.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/mutex.h> #include <media/dvbdev.h> /* Due to enum tuner_pad_index */ #include <media/tuner.h> static DEFINE_MUTEX(dvbdev_mutex); static LIST_HEAD(dvbdevfops_list); static int dvbdev_debug; module_param(dvbdev_debug, int, 0644); MODULE_PARM_DESC(dvbdev_debug, "Turn on/off device debugging (default:off)."); #define dprintk(fmt, arg...) do { \ if (dvbdev_debug) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) static LIST_HEAD(dvb_adapter_list); static DEFINE_MUTEX(dvbdev_register_lock); static const char * const dnames[] = { [DVB_DEVICE_VIDEO] = "video", [DVB_DEVICE_AUDIO] = "audio", [DVB_DEVICE_SEC] = "sec", [DVB_DEVICE_FRONTEND] = "frontend", [DVB_DEVICE_DEMUX] = "demux", [DVB_DEVICE_DVR] = "dvr", [DVB_DEVICE_CA] = "ca", [DVB_DEVICE_NET] = "net", [DVB_DEVICE_OSD] = "osd" }; #ifdef CONFIG_DVB_DYNAMIC_MINORS #define MAX_DVB_MINORS 256 #define DVB_MAX_IDS MAX_DVB_MINORS #else #define DVB_MAX_IDS 4 static const u8 minor_type[] = { [DVB_DEVICE_VIDEO] = 0, [DVB_DEVICE_AUDIO] = 1, [DVB_DEVICE_SEC] = 2, [DVB_DEVICE_FRONTEND] = 3, [DVB_DEVICE_DEMUX] = 4, [DVB_DEVICE_DVR] = 5, [DVB_DEVICE_CA] = 6, [DVB_DEVICE_NET] = 7, [DVB_DEVICE_OSD] = 8, }; #define nums2minor(num, type, id) \ (((num) << 6) | ((id) << 4) | minor_type[type]) #define MAX_DVB_MINORS (DVB_MAX_ADAPTERS * 64) #endif static struct class *dvb_class; static struct dvb_device *dvb_minors[MAX_DVB_MINORS]; static DECLARE_RWSEM(minor_rwsem); static int dvb_device_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev; unsigned int minor = iminor(inode); if (minor >= MAX_DVB_MINORS) return -ENODEV; mutex_lock(&dvbdev_mutex); down_read(&minor_rwsem); dvbdev = dvb_minors[minor]; if (dvbdev && dvbdev->fops) { int err = 0; const struct file_operations *new_fops; new_fops = fops_get(dvbdev->fops); if (!new_fops) goto fail; file->private_data = dvb_device_get(dvbdev); replace_fops(file, new_fops); if (file->f_op->open) err = file->f_op->open(inode, file); up_read(&minor_rwsem); mutex_unlock(&dvbdev_mutex); if (err) dvb_device_put(dvbdev); return err; } fail: up_read(&minor_rwsem); mutex_unlock(&dvbdev_mutex); return -ENODEV; } static const struct file_operations dvb_device_fops = { .owner = THIS_MODULE, .open = dvb_device_open, .llseek = noop_llseek, }; static struct cdev dvb_device_cdev; int dvb_generic_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; if (!dvbdev) return -ENODEV; if (!dvbdev->users) return -EBUSY; if ((file->f_flags & O_ACCMODE) == O_RDONLY) { if (!dvbdev->readers) return -EBUSY; dvbdev->readers--; } else { if (!dvbdev->writers) return -EBUSY; dvbdev->writers--; } dvbdev->users--; return 0; } EXPORT_SYMBOL(dvb_generic_open); int dvb_generic_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; if (!dvbdev) return -ENODEV; if ((file->f_flags & O_ACCMODE) == O_RDONLY) dvbdev->readers++; else dvbdev->writers++; dvbdev->users++; dvb_device_put(dvbdev); return 0; } EXPORT_SYMBOL(dvb_generic_release); long dvb_generic_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct dvb_device *dvbdev = file->private_data; if (!dvbdev) return -ENODEV; if (!dvbdev->kernel_ioctl) return -EINVAL; return dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl); } EXPORT_SYMBOL(dvb_generic_ioctl); static int dvbdev_get_free_id(struct dvb_adapter *adap, int type) { u32 id = 0; while (id < DVB_MAX_IDS) { struct dvb_device *dev; list_for_each_entry(dev, &adap->device_list, list_head) if (dev->type == type && dev->id == id) goto skip; return id; skip: id++; } return -ENFILE; } static void dvb_media_device_free(struct dvb_device *dvbdev) { #if defined(CONFIG_MEDIA_CONTROLLER_DVB) if (dvbdev->entity) { media_device_unregister_entity(dvbdev->entity); kfree(dvbdev->entity); kfree(dvbdev->pads); dvbdev->entity = NULL; dvbdev->pads = NULL; } if (dvbdev->tsout_entity) { int i; for (i = 0; i < dvbdev->tsout_num_entities; i++) { media_device_unregister_entity(&dvbdev->tsout_entity[i]); kfree(dvbdev->tsout_entity[i].name); } kfree(dvbdev->tsout_entity); kfree(dvbdev->tsout_pads); dvbdev->tsout_entity = NULL; dvbdev->tsout_pads = NULL; dvbdev->tsout_num_entities = 0; } if (dvbdev->intf_devnode) { media_devnode_remove(dvbdev->intf_devnode); dvbdev->intf_devnode = NULL; } if (dvbdev->adapter->conn) { media_device_unregister_entity(dvbdev->adapter->conn); kfree(dvbdev->adapter->conn); dvbdev->adapter->conn = NULL; kfree(dvbdev->adapter->conn_pads); dvbdev->adapter->conn_pads = NULL; } #endif } #if defined(CONFIG_MEDIA_CONTROLLER_DVB) static int dvb_create_tsout_entity(struct dvb_device *dvbdev, const char *name, int npads) { int i; dvbdev->tsout_pads = kcalloc(npads, sizeof(*dvbdev->tsout_pads), GFP_KERNEL); if (!dvbdev->tsout_pads) return -ENOMEM; dvbdev->tsout_entity = kcalloc(npads, sizeof(*dvbdev->tsout_entity), GFP_KERNEL); if (!dvbdev->tsout_entity) return -ENOMEM; dvbdev->tsout_num_entities = npads; for (i = 0; i < npads; i++) { struct media_pad *pads = &dvbdev->tsout_pads[i]; struct media_entity *entity = &dvbdev->tsout_entity[i]; int ret; entity->name = kasprintf(GFP_KERNEL, "%s #%d", name, i); if (!entity->name) return -ENOMEM; entity->function = MEDIA_ENT_F_IO_DTV; pads->flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(entity, 1, pads); if (ret < 0) return ret; ret = media_device_register_entity(dvbdev->adapter->mdev, entity); if (ret < 0) return ret; } return 0; } #define DEMUX_TSOUT "demux-tsout" #define DVR_TSOUT "dvr-tsout" static int dvb_create_media_entity(struct dvb_device *dvbdev, int type, int demux_sink_pads) { int i, ret, npads; switch (type) { case DVB_DEVICE_FRONTEND: npads = 2; break; case DVB_DEVICE_DVR: ret = dvb_create_tsout_entity(dvbdev, DVR_TSOUT, demux_sink_pads); return ret; case DVB_DEVICE_DEMUX: npads = 1 + demux_sink_pads; ret = dvb_create_tsout_entity(dvbdev, DEMUX_TSOUT, demux_sink_pads); if (ret < 0) return ret; break; case DVB_DEVICE_CA: npads = 2; break; case DVB_DEVICE_NET: /* * We should be creating entities for the MPE/ULE * decapsulation hardware (or software implementation). * * However, the number of for the MPE/ULE decaps may not be * fixed. As we don't have yet dynamic support for PADs at * the Media Controller, let's not create the decap * entities yet. */ return 0; default: return 0; } dvbdev->entity = kzalloc(sizeof(*dvbdev->entity), GFP_KERNEL); if (!dvbdev->entity) return -ENOMEM; dvbdev->entity->name = dvbdev->name; if (npads) { dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads), GFP_KERNEL); if (!dvbdev->pads) { kfree(dvbdev->entity); dvbdev->entity = NULL; return -ENOMEM; } } switch (type) { case DVB_DEVICE_FRONTEND: dvbdev->entity->function = MEDIA_ENT_F_DTV_DEMOD; dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK; dvbdev->pads[1].flags = MEDIA_PAD_FL_SOURCE; break; case DVB_DEVICE_DEMUX: dvbdev->entity->function = MEDIA_ENT_F_TS_DEMUX; dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK; for (i = 1; i < npads; i++) dvbdev->pads[i].flags = MEDIA_PAD_FL_SOURCE; break; case DVB_DEVICE_CA: dvbdev->entity->function = MEDIA_ENT_F_DTV_CA; dvbdev->pads[0].flags = MEDIA_PAD_FL_SINK; dvbdev->pads[1].flags = MEDIA_PAD_FL_SOURCE; break; default: /* Should never happen, as the first switch prevents it */ kfree(dvbdev->entity); kfree(dvbdev->pads); dvbdev->entity = NULL; dvbdev->pads = NULL; return 0; } if (npads) { ret = media_entity_pads_init(dvbdev->entity, npads, dvbdev->pads); if (ret) return ret; } ret = media_device_register_entity(dvbdev->adapter->mdev, dvbdev->entity); if (ret) return ret; pr_info("%s: media entity '%s' registered.\n", __func__, dvbdev->entity->name); return 0; } #endif static int dvb_register_media_device(struct dvb_device *dvbdev, int type, int minor, unsigned int demux_sink_pads) { #if defined(CONFIG_MEDIA_CONTROLLER_DVB) struct media_link *link; u32 intf_type; int ret; if (!dvbdev->adapter->mdev) return 0; ret = dvb_create_media_entity(dvbdev, type, demux_sink_pads); if (ret) return ret; switch (type) { case DVB_DEVICE_FRONTEND: intf_type = MEDIA_INTF_T_DVB_FE; break; case DVB_DEVICE_DEMUX: intf_type = MEDIA_INTF_T_DVB_DEMUX; break; case DVB_DEVICE_DVR: intf_type = MEDIA_INTF_T_DVB_DVR; break; case DVB_DEVICE_CA: intf_type = MEDIA_INTF_T_DVB_CA; break; case DVB_DEVICE_NET: intf_type = MEDIA_INTF_T_DVB_NET; break; default: return 0; } dvbdev->intf_devnode = media_devnode_create(dvbdev->adapter->mdev, intf_type, 0, DVB_MAJOR, minor); if (!dvbdev->intf_devnode) return -ENOMEM; /* * Create the "obvious" link, e. g. the ones that represent * a direct association between an interface and an entity. * Other links should be created elsewhere, like: * DVB FE intf -> tuner * DVB demux intf -> dvr */ if (!dvbdev->entity) return 0; link = media_create_intf_link(dvbdev->entity, &dvbdev->intf_devnode->intf, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (!link) return -ENOMEM; #endif return 0; } int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, const struct dvb_device *template, void *priv, enum dvb_device_type type, int demux_sink_pads) { struct dvb_device *dvbdev; struct file_operations *dvbdevfops = NULL; struct dvbdevfops_node *node = NULL, *new_node = NULL; struct device *clsdev; int minor; int id, ret; mutex_lock(&dvbdev_register_lock); id = dvbdev_get_free_id(adap, type); if (id < 0) { mutex_unlock(&dvbdev_register_lock); *pdvbdev = NULL; pr_err("%s: couldn't find free device id\n", __func__); return -ENFILE; } *pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL); if (!dvbdev) { mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } /* * When a device of the same type is probe()d more than once, * the first allocated fops are used. This prevents memory leaks * that can occur when the same device is probe()d repeatedly. */ list_for_each_entry(node, &dvbdevfops_list, list_head) { if (node->fops->owner == adap->module && node->type == type && node->template == template) { dvbdevfops = node->fops; break; } } if (!dvbdevfops) { dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); if (!dvbdevfops) { kfree(dvbdev); *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); if (!new_node) { kfree(dvbdevfops); kfree(dvbdev); *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } new_node->fops = dvbdevfops; new_node->type = type; new_node->template = template; list_add_tail(&new_node->list_head, &dvbdevfops_list); } memcpy(dvbdev, template, sizeof(struct dvb_device)); kref_init(&dvbdev->ref); dvbdev->type = type; dvbdev->id = id; dvbdev->adapter = adap; dvbdev->priv = priv; dvbdev->fops = dvbdevfops; init_waitqueue_head(&dvbdev->wait_queue); dvbdevfops->owner = adap->module; list_add_tail(&dvbdev->list_head, &adap->device_list); down_write(&minor_rwsem); #ifdef CONFIG_DVB_DYNAMIC_MINORS for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (!dvb_minors[minor]) break; #else minor = nums2minor(adap->num, type, id); #endif if (minor >= MAX_DVB_MINORS) { if (new_node) { list_del(&new_node->list_head); kfree(dvbdevfops); kfree(new_node); } list_del(&dvbdev->list_head); kfree(dvbdev); *pdvbdev = NULL; up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); return -EINVAL; } dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); if (ret) { pr_err("%s: dvb_register_media_device failed to create the mediagraph\n", __func__); if (new_node) { list_del(&new_node->list_head); kfree(dvbdevfops); kfree(new_node); } dvb_media_device_free(dvbdev); list_del(&dvbdev->list_head); kfree(dvbdev); *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return ret; } clsdev = device_create(dvb_class, adap->device, MKDEV(DVB_MAJOR, minor), dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id); if (IS_ERR(clsdev)) { pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n", __func__, adap->num, dnames[type], id, PTR_ERR(clsdev)); if (new_node) { list_del(&new_node->list_head); kfree(dvbdevfops); kfree(new_node); } dvb_media_device_free(dvbdev); list_del(&dvbdev->list_head); kfree(dvbdev); *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return PTR_ERR(clsdev); } dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n", adap->num, dnames[type], id, minor, minor); mutex_unlock(&dvbdev_register_lock); return 0; } EXPORT_SYMBOL(dvb_register_device); void dvb_remove_device(struct dvb_device *dvbdev) { if (!dvbdev) return; down_write(&minor_rwsem); dvb_minors[dvbdev->minor] = NULL; dvb_device_put(dvbdev); up_write(&minor_rwsem); dvb_media_device_free(dvbdev); device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor)); list_del(&dvbdev->list_head); } EXPORT_SYMBOL(dvb_remove_device); static void dvb_free_device(struct kref *ref) { struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref); kfree(dvbdev); } struct dvb_device *dvb_device_get(struct dvb_device *dvbdev) { kref_get(&dvbdev->ref); return dvbdev; } EXPORT_SYMBOL(dvb_device_get); void dvb_device_put(struct dvb_device *dvbdev) { if (dvbdev) kref_put(&dvbdev->ref, dvb_free_device); } void dvb_unregister_device(struct dvb_device *dvbdev) { dvb_remove_device(dvbdev); dvb_device_put(dvbdev); } EXPORT_SYMBOL(dvb_unregister_device); #ifdef CONFIG_MEDIA_CONTROLLER_DVB static int dvb_create_io_intf_links(struct dvb_adapter *adap, struct media_interface *intf, char *name) { struct media_device *mdev = adap->mdev; struct media_entity *entity; struct media_link *link; media_device_for_each_entity(entity, mdev) { if (entity->function == MEDIA_ENT_F_IO_DTV) { if (strncmp(entity->name, name, strlen(name))) continue; link = media_create_intf_link(entity, intf, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (!link) return -ENOMEM; } } return 0; } int dvb_create_media_graph(struct dvb_adapter *adap, bool create_rf_connector) { struct media_device *mdev = adap->mdev; struct media_entity *entity, *tuner = NULL, *demod = NULL, *conn; struct media_entity *demux = NULL, *ca = NULL; struct media_link *link; struct media_interface *intf; unsigned int demux_pad = 0; unsigned int dvr_pad = 0; unsigned int ntuner = 0, ndemod = 0; int ret, pad_source, pad_sink; static const char *connector_name = "Television"; if (!mdev) return 0; media_device_for_each_entity(entity, mdev) { switch (entity->function) { case MEDIA_ENT_F_TUNER: tuner = entity; ntuner++; break; case MEDIA_ENT_F_DTV_DEMOD: demod = entity; ndemod++; break; case MEDIA_ENT_F_TS_DEMUX: demux = entity; break; case MEDIA_ENT_F_DTV_CA: ca = entity; break; } } /* * Prepare to signalize to media_create_pad_links() that multiple * entities of the same type exists and a 1:n or n:1 links need to be * created. * NOTE: if both tuner and demod have multiple instances, it is up * to the caller driver to create such links. */ if (ntuner > 1) tuner = NULL; if (ndemod > 1) demod = NULL; if (create_rf_connector) { conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) return -ENOMEM; adap->conn = conn; adap->conn_pads = kzalloc(sizeof(*adap->conn_pads), GFP_KERNEL); if (!adap->conn_pads) return -ENOMEM; conn->flags = MEDIA_ENT_FL_CONNECTOR; conn->function = MEDIA_ENT_F_CONN_RF; conn->name = connector_name; adap->conn_pads->flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(conn, 1, adap->conn_pads); if (ret) return ret; ret = media_device_register_entity(mdev, conn); if (ret) return ret; if (!ntuner) { ret = media_create_pad_links(mdev, MEDIA_ENT_F_CONN_RF, conn, 0, MEDIA_ENT_F_DTV_DEMOD, demod, 0, MEDIA_LNK_FL_ENABLED, false); } else { pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK, PAD_SIGNAL_ANALOG); if (pad_sink < 0) return -EINVAL; ret = media_create_pad_links(mdev, MEDIA_ENT_F_CONN_RF, conn, 0, MEDIA_ENT_F_TUNER, tuner, pad_sink, MEDIA_LNK_FL_ENABLED, false); } if (ret) return ret; } if (ntuner && ndemod) { /* NOTE: first found tuner source pad presumed correct */ pad_source = media_get_pad_index(tuner, MEDIA_PAD_FL_SOURCE, PAD_SIGNAL_ANALOG); if (pad_source < 0) return -EINVAL; ret = media_create_pad_links(mdev, MEDIA_ENT_F_TUNER, tuner, pad_source, MEDIA_ENT_F_DTV_DEMOD, demod, 0, MEDIA_LNK_FL_ENABLED, false); if (ret) return ret; } if (ndemod && demux) { ret = media_create_pad_links(mdev, MEDIA_ENT_F_DTV_DEMOD, demod, 1, MEDIA_ENT_F_TS_DEMUX, demux, 0, MEDIA_LNK_FL_ENABLED, false); if (ret) return ret; } if (demux && ca) { ret = media_create_pad_link(demux, 1, ca, 0, MEDIA_LNK_FL_ENABLED); if (ret) return ret; } /* Create demux links for each ringbuffer/pad */ if (demux) { media_device_for_each_entity(entity, mdev) { if (entity->function == MEDIA_ENT_F_IO_DTV) { if (!strncmp(entity->name, DVR_TSOUT, strlen(DVR_TSOUT))) { ret = media_create_pad_link(demux, ++dvr_pad, entity, 0, 0); if (ret) return ret; } if (!strncmp(entity->name, DEMUX_TSOUT, strlen(DEMUX_TSOUT))) { ret = media_create_pad_link(demux, ++demux_pad, entity, 0, 0); if (ret) return ret; } } } } /* Create interface links for FE->tuner, DVR->demux and CA->ca */ media_device_for_each_intf(intf, mdev) { if (intf->type == MEDIA_INTF_T_DVB_CA && ca) { link = media_create_intf_link(ca, intf, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (!link) return -ENOMEM; } if (intf->type == MEDIA_INTF_T_DVB_FE && tuner) { link = media_create_intf_link(tuner, intf, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (!link) return -ENOMEM; } #if 0 /* * Indirect link - let's not create yet, as we don't know how * to handle indirect links, nor if this will * actually be needed. */ if (intf->type == MEDIA_INTF_T_DVB_DVR && demux) { link = media_create_intf_link(demux, intf, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (!link) return -ENOMEM; } #endif if (intf->type == MEDIA_INTF_T_DVB_DVR) { ret = dvb_create_io_intf_links(adap, intf, DVR_TSOUT); if (ret) return ret; } if (intf->type == MEDIA_INTF_T_DVB_DEMUX) { ret = dvb_create_io_intf_links(adap, intf, DEMUX_TSOUT); if (ret) return ret; } } return 0; } EXPORT_SYMBOL_GPL(dvb_create_media_graph); #endif static int dvbdev_check_free_adapter_num(int num) { struct list_head *entry; list_for_each(entry, &dvb_adapter_list) { struct dvb_adapter *adap; adap = list_entry(entry, struct dvb_adapter, list_head); if (adap->num == num) return 0; } return 1; } static int dvbdev_get_free_adapter_num(void) { int num = 0; while (num < DVB_MAX_ADAPTERS) { if (dvbdev_check_free_adapter_num(num)) return num; num++; } return -ENFILE; } int dvb_register_adapter(struct dvb_adapter *adap, const char *name, struct module *module, struct device *device, short *adapter_nums) { int i, num; mutex_lock(&dvbdev_register_lock); for (i = 0; i < DVB_MAX_ADAPTERS; ++i) { num = adapter_nums[i]; if (num >= 0 && num < DVB_MAX_ADAPTERS) { /* use the one the driver asked for */ if (dvbdev_check_free_adapter_num(num)) break; } else { num = dvbdev_get_free_adapter_num(); break; } num = -1; } if (num < 0) { mutex_unlock(&dvbdev_register_lock); return -ENFILE; } memset(adap, 0, sizeof(struct dvb_adapter)); INIT_LIST_HEAD(&adap->device_list); pr_info("DVB: registering new adapter (%s)\n", name); adap->num = num; adap->name = name; adap->module = module; adap->device = device; adap->mfe_shared = 0; adap->mfe_dvbdev = NULL; mutex_init(&adap->mfe_lock); #ifdef CONFIG_MEDIA_CONTROLLER_DVB mutex_init(&adap->mdev_lock); #endif list_add_tail(&adap->list_head, &dvb_adapter_list); mutex_unlock(&dvbdev_register_lock); return num; } EXPORT_SYMBOL(dvb_register_adapter); int dvb_unregister_adapter(struct dvb_adapter *adap) { mutex_lock(&dvbdev_register_lock); list_del(&adap->list_head); mutex_unlock(&dvbdev_register_lock); return 0; } EXPORT_SYMBOL(dvb_unregister_adapter); /* * if the miracle happens and "generic_usercopy()" is included into * the kernel, then this can vanish. please don't make the mistake and * define this as video_usercopy(). this will introduce a dependency * to the v4l "videodev.o" module, which is unnecessary for some * cards (ie. the budget dvb-cards don't need the v4l module...) */ int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg, int (*func)(struct file *file, unsigned int cmd, void *arg)) { char sbuf[128] = {}; void *mbuf = NULL; void *parg = NULL; int err = -EINVAL; /* Copy arguments into temp kernel buffer */ switch (_IOC_DIR(cmd)) { case _IOC_NONE: /* * For this command, the pointer is actually an integer * argument. */ parg = (void *)arg; break; case _IOC_READ: /* some v4l ioctls are marked wrong ... */ case _IOC_WRITE: case (_IOC_WRITE | _IOC_READ): if (_IOC_SIZE(cmd) <= sizeof(sbuf)) { parg = sbuf; } else { /* too big to allocate from stack */ mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); if (!mbuf) return -ENOMEM; parg = mbuf; } err = -EFAULT; if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd))) goto out; break; } /* call driver */ err = func(file, cmd, parg); if (err == -ENOIOCTLCMD) err = -ENOTTY; if (err < 0) goto out; /* Copy results into user buffer */ switch (_IOC_DIR(cmd)) { case _IOC_READ: case (_IOC_WRITE | _IOC_READ): if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd))) err = -EFAULT; break; } out: kfree(mbuf); return err; } #if IS_ENABLED(CONFIG_I2C) struct i2c_client *dvb_module_probe(const char *module_name, const char *name, struct i2c_adapter *adap, unsigned char addr, void *platform_data) { struct i2c_client *client; struct i2c_board_info *board_info; board_info = kzalloc(sizeof(*board_info), GFP_KERNEL); if (!board_info) return NULL; if (name) strscpy(board_info->type, name, I2C_NAME_SIZE); else strscpy(board_info->type, module_name, I2C_NAME_SIZE); board_info->addr = addr; board_info->platform_data = platform_data; request_module(module_name); client = i2c_new_client_device(adap, board_info); if (!i2c_client_has_driver(client)) { kfree(board_info); return NULL; } if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); client = NULL; } kfree(board_info); return client; } EXPORT_SYMBOL_GPL(dvb_module_probe); void dvb_module_release(struct i2c_client *client) { if (!client) return; module_put(client->dev.driver->owner); i2c_unregister_device(client); } EXPORT_SYMBOL_GPL(dvb_module_release); #endif static int dvb_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct dvb_device *dvbdev = dev_get_drvdata(dev); add_uevent_var(env, "DVB_ADAPTER_NUM=%d", dvbdev->adapter->num); add_uevent_var(env, "DVB_DEVICE_TYPE=%s", dnames[dvbdev->type]); add_uevent_var(env, "DVB_DEVICE_NUM=%d", dvbdev->id); return 0; } static char *dvb_devnode(const struct device *dev, umode_t *mode) { const struct dvb_device *dvbdev = dev_get_drvdata(dev); return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d", dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id); } static int __init init_dvbdev(void) { int retval; dev_t dev = MKDEV(DVB_MAJOR, 0); retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB"); if (retval != 0) { pr_err("dvb-core: unable to get major %d\n", DVB_MAJOR); return retval; } cdev_init(&dvb_device_cdev, &dvb_device_fops); retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS); if (retval != 0) { pr_err("dvb-core: unable register character device\n"); goto error; } dvb_class = class_create("dvb"); if (IS_ERR(dvb_class)) { retval = PTR_ERR(dvb_class); goto error; } dvb_class->dev_uevent = dvb_uevent; dvb_class->devnode = dvb_devnode; return 0; error: cdev_del(&dvb_device_cdev); unregister_chrdev_region(dev, MAX_DVB_MINORS); return retval; } static void __exit exit_dvbdev(void) { struct dvbdevfops_node *node, *next; class_destroy(dvb_class); cdev_del(&dvb_device_cdev); unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS); list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) { list_del(&node->list_head); kfree(node->fops); kfree(node); } } subsys_initcall(init_dvbdev); module_exit(exit_dvbdev); MODULE_DESCRIPTION("DVB Core Driver"); MODULE_AUTHOR("Marcus Metzler, Ralph Metzler, Holger Waechtler"); MODULE_LICENSE("GPL");
9 9 3198 162 3 453 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 /* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/idr.h * * 2002-10-18 written by Jim Houston jim.houston@ccur.com * Copyright (C) 2002 by Concurrent Computer Corporation * * Small id to pointer translation service avoiding fixed sized * tables. */ #ifndef __IDR_H__ #define __IDR_H__ #include <linux/radix-tree.h> #include <linux/gfp.h> #include <linux/percpu.h> #include <linux/cleanup.h> struct idr { struct radix_tree_root idr_rt; unsigned int idr_base; unsigned int idr_next; }; /* * The IDR API does not expose the tagging functionality of the radix tree * to users. Use tag 0 to track whether a node has free space below it. */ #define IDR_FREE 0 /* Set the IDR flag and the IDR_FREE tag */ #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ (1 << (ROOT_TAG_SHIFT + IDR_FREE))) #define IDR_INIT_BASE(name, base) { \ .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \ .idr_base = (base), \ .idr_next = 0, \ } /** * IDR_INIT() - Initialise an IDR. * @name: Name of IDR. * * A freshly-initialised IDR contains no IDs. */ #define IDR_INIT(name) IDR_INIT_BASE(name, 0) /** * DEFINE_IDR() - Define a statically-allocated IDR. * @name: Name of IDR. * * An IDR defined using this macro is ready for use with no additional * initialisation required. It contains no IDs. */ #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) /** * idr_get_cursor - Return the current position of the cyclic allocator * @idr: idr handle * * The value returned is the value that will be next returned from * idr_alloc_cyclic() if it is free (otherwise the search will start from * this position). */ static inline unsigned int idr_get_cursor(const struct idr *idr) { return READ_ONCE(idr->idr_next); } /** * idr_set_cursor - Set the current position of the cyclic allocator * @idr: idr handle * @val: new position * * The next call to idr_alloc_cyclic() will return @val if it is free * (otherwise the search will start from this position). */ static inline void idr_set_cursor(struct idr *idr, unsigned int val) { WRITE_ONCE(idr->idr_next, val); } /** * DOC: idr sync * idr synchronization (stolen from radix-tree.h) * * idr_find() is able to be called locklessly, using RCU. The caller must * ensure calls to this function are made within rcu_read_lock() regions. * Other readers (lock-free or otherwise) and modifications may be running * concurrently. * * It is still required that the caller manage the synchronization and * lifetimes of the items. So if RCU lock-free lookups are used, typically * this would mean that the items have their own locks, or are amenable to * lock-free access; and that the items are freed by RCU (or only freed after * having been deleted from the idr tree *and* a synchronize_rcu() grace * period). */ #define idr_lock(idr) xa_lock(&(idr)->idr_rt) #define idr_unlock(idr) xa_unlock(&(idr)->idr_rt) #define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt) #define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt) #define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt) #define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt) #define idr_lock_irqsave(idr, flags) \ xa_lock_irqsave(&(idr)->idr_rt, flags) #define idr_unlock_irqrestore(idr, flags) \ xa_unlock_irqrestore(&(idr)->idr_rt, flags) void idr_preload(gfp_t gfp_mask); int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, unsigned long max, gfp_t); int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); void *idr_remove(struct idr *, unsigned long id); void *idr_find(const struct idr *, unsigned long id); int idr_for_each(const struct idr *, int (*fn)(int id, void *p, void *data), void *data); void *idr_get_next(struct idr *, int *nextid); void *idr_get_next_ul(struct idr *, unsigned long *nextid); void *idr_replace(struct idr *, void *, unsigned long id); void idr_destroy(struct idr *); struct __class_idr { struct idr *idr; int id; }; #define idr_null ((struct __class_idr){ NULL, -1 }) #define take_idr_id(id) __get_and_null(id, idr_null) DEFINE_CLASS(idr_alloc, struct __class_idr, if (_T.id >= 0) idr_remove(_T.idr, _T.id), ((struct __class_idr){ .idr = idr, .id = idr_alloc(idr, ptr, start, end, gfp), }), struct idr *idr, void *ptr, int start, int end, gfp_t gfp); /** * idr_init_base() - Initialise an IDR. * @idr: IDR handle. * @base: The base value for the IDR. * * This variation of idr_init() creates an IDR which will allocate IDs * starting at %base. */ static inline void idr_init_base(struct idr *idr, int base) { INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); idr->idr_base = base; idr->idr_next = 0; } /** * idr_init() - Initialise an IDR. * @idr: IDR handle. * * Initialise a dynamically allocated IDR. To initialise a * statically allocated IDR, use DEFINE_IDR(). */ static inline void idr_init(struct idr *idr) { idr_init_base(idr, 0); } /** * idr_is_empty() - Are there any IDs allocated? * @idr: IDR handle. * * Return: %true if any IDs have been allocated from this IDR. */ static inline bool idr_is_empty(const struct idr *idr) { return radix_tree_empty(&idr->idr_rt) && radix_tree_tagged(&idr->idr_rt, IDR_FREE); } /** * idr_preload_end - end preload section started with idr_preload() * * Each idr_preload() should be matched with an invocation of this * function. See idr_preload() for details. */ static inline void idr_preload_end(void) { local_unlock(&radix_tree_preloads.lock); } /** * idr_for_each_entry() - Iterate over an IDR's elements of a given type. * @idr: IDR handle. * @entry: The type * to use as cursor * @id: Entry ID. * * @entry and @id do not need to be initialized before the loop, and * after normal termination @entry is left with the value NULL. This * is convenient for a "not found" value. */ #define idr_for_each_entry(idr, entry, id) \ for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U) /** * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. * @idr: IDR handle. * @entry: The type * to use as cursor. * @tmp: A temporary placeholder for ID. * @id: Entry ID. * * @entry and @id do not need to be initialized before the loop, and * after normal termination @entry is left with the value NULL. This * is convenient for a "not found" value. */ #define idr_for_each_entry_ul(idr, entry, tmp, id) \ for (tmp = 0, id = 0; \ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ tmp = id, ++id) /** * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type * @idr: IDR handle. * @entry: The type * to use as a cursor. * @id: Entry ID. * * Continue to iterate over entries, continuing after the current position. */ #define idr_for_each_entry_continue(idr, entry, id) \ for ((entry) = idr_get_next((idr), &(id)); \ entry; \ ++id, (entry) = idr_get_next((idr), &(id))) /** * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type * @idr: IDR handle. * @entry: The type * to use as a cursor. * @tmp: A temporary placeholder for ID. * @id: Entry ID. * * Continue to iterate over entries, continuing after the current position. * After normal termination @entry is left with the value NULL. This * is convenient for a "not found" value. */ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ for (tmp = id; \ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \ tmp = id, ++id) /* * IDA - ID Allocator, use when translation from id to pointer isn't necessary. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) struct ida_bitmap { unsigned long bitmap[IDA_BITMAP_LONGS]; }; struct ida { struct xarray xa; }; #define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC) #define IDA_INIT(name) { \ .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \ } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t); void ida_free(struct ida *, unsigned int id); void ida_destroy(struct ida *ida); int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max); /** * ida_alloc() - Allocate an unused ID. * @ida: IDA handle. * @gfp: Memory allocation flags. * * Allocate an ID between 0 and %INT_MAX, inclusive. * * Context: Any context. It is safe to call this function without * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ static inline int ida_alloc(struct ida *ida, gfp_t gfp) { return ida_alloc_range(ida, 0, ~0, gfp); } /** * ida_alloc_min() - Allocate an unused ID. * @ida: IDA handle. * @min: Lowest ID to allocate. * @gfp: Memory allocation flags. * * Allocate an ID between @min and %INT_MAX, inclusive. * * Context: Any context. It is safe to call this function without * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) { return ida_alloc_range(ida, min, ~0, gfp); } /** * ida_alloc_max() - Allocate an unused ID. * @ida: IDA handle. * @max: Highest ID to allocate. * @gfp: Memory allocation flags. * * Allocate an ID between 0 and @max, inclusive. * * Context: Any context. It is safe to call this function without * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) { return ida_alloc_range(ida, 0, max, gfp); } static inline void ida_init(struct ida *ida) { xa_init_flags(&ida->xa, IDA_INIT_FLAGS); } /* * ida_simple_get() and ida_simple_remove() are deprecated. Use * ida_alloc() and ida_free() instead respectively. */ #define ida_simple_get(ida, start, end, gfp) \ ida_alloc_range(ida, start, (end) - 1, gfp) #define ida_simple_remove(ida, id) ida_free(ida, id) static inline bool ida_is_empty(const struct ida *ida) { return xa_empty(&ida->xa); } static inline bool ida_exists(struct ida *ida, unsigned int id) { return ida_find_first_range(ida, id, id) == id; } static inline int ida_find_first(struct ida *ida) { return ida_find_first_range(ida, 0, ~0); } #endif /* __IDR_H__ */
34 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BH_H #define _LINUX_BH_H #include <linux/instruction_pointer.h> #include <linux/preempt.h> #if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) { preempt_count_add(cnt); barrier(); } #endif static inline void local_bh_disable(void) { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } extern void _local_bh_enable(void); extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); static inline void local_bh_enable_ip(unsigned long ip) { __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); } static inline void local_bh_enable(void) { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } #ifdef CONFIG_PREEMPT_RT extern bool local_bh_blocked(void); #else static inline bool local_bh_blocked(void) { return false; } #endif #endif /* _LINUX_BH_H */
1301 1301 3303 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 /* * include/linux/ktime.h * * ktime_t - nanosecond-resolution time format. * * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar * * data type definitions, declarations, prototypes and macros. * * Started by: Thomas Gleixner and Ingo Molnar * * Credits: * * Roman Zippel provided the ideas and primary code snippets of * the ktime_t union and further simplifications of the original * code. * * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_KTIME_H #define _LINUX_KTIME_H #include <asm/bug.h> #include <linux/jiffies.h> #include <linux/time.h> #include <linux/types.h> /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value * @secs: seconds to set * @nsecs: nanoseconds to set * * Return: The ktime_t representation of the value. */ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) { if (unlikely(secs >= KTIME_SEC_MAX)) return KTIME_MAX; return secs * NSEC_PER_SEC + (s64)nsecs; } /* Subtract two ktime_t variables. rem = lhs -rhs: */ #define ktime_sub(lhs, rhs) ((lhs) - (rhs)) /* Add two ktime_t variables. res = lhs + rhs: */ #define ktime_add(lhs, rhs) ((lhs) + (rhs)) /* * Same as ktime_add(), but avoids undefined behaviour on overflow; however, * this means that you must check the result for overflow yourself. */ #define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) /* * Add a ktime_t variable and a scalar nanosecond value. * res = kt + nsval: */ #define ktime_add_ns(kt, nsval) ((kt) + (nsval)) /* * Subtract a scalar nanosecod from a ktime_t variable * res = kt - nsval: */ #define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) /* convert a timespec64 to ktime_t format: */ static inline ktime_t timespec64_to_ktime(struct timespec64 ts) { return ktime_set(ts.tv_sec, ts.tv_nsec); } /* Map the ktime_t to timespec conversion to ns_to_timespec function */ #define ktime_to_timespec64(kt) ns_to_timespec64((kt)) /* Convert ktime_t to nanoseconds */ static inline s64 ktime_to_ns(const ktime_t kt) { return kt; } /** * ktime_compare - Compares two ktime_t variables for less, greater or equal * @cmp1: comparable1 * @cmp2: comparable2 * * Return: ... * cmp1 < cmp2: return <0 * cmp1 == cmp2: return 0 * cmp1 > cmp2: return >0 */ static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) { if (cmp1 < cmp2) return -1; if (cmp1 > cmp2) return 1; return 0; } /** * ktime_after - Compare if a ktime_t value is bigger than another one. * @cmp1: comparable1 * @cmp2: comparable2 * * Return: true if cmp1 happened after cmp2. */ static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2) { return ktime_compare(cmp1, cmp2) > 0; } /** * ktime_before - Compare if a ktime_t value is smaller than another one. * @cmp1: comparable1 * @cmp2: comparable2 * * Return: true if cmp1 happened before cmp2. */ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) { return ktime_compare(cmp1, cmp2) < 0; } #if BITS_PER_LONG < 64 extern s64 __ktime_divns(const ktime_t kt, s64 div); static inline s64 ktime_divns(const ktime_t kt, s64 div) { /* * Negative divisors could cause an inf loop, * so bug out here. */ BUG_ON(div < 0); if (__builtin_constant_p(div) && !(div >> 32)) { s64 ns = kt; u64 tmp = ns < 0 ? -ns : ns; do_div(tmp, div); return ns < 0 ? -tmp : tmp; } else { return __ktime_divns(kt, div); } } #else /* BITS_PER_LONG < 64 */ static inline s64 ktime_divns(const ktime_t kt, s64 div) { /* * 32-bit implementation cannot handle negative divisors, * so catch them on 64bit as well. */ WARN_ON(div < 0); return kt / div; } #endif static inline s64 ktime_to_us(const ktime_t kt) { return ktime_divns(kt, NSEC_PER_USEC); } static inline s64 ktime_to_ms(const ktime_t kt) { return ktime_divns(kt, NSEC_PER_MSEC); } static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) { return ktime_to_us(ktime_sub(later, earlier)); } static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier) { return ktime_to_ms(ktime_sub(later, earlier)); } static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) { return ktime_add_ns(kt, usec * NSEC_PER_USEC); } static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) { return ktime_add_ns(kt, msec * NSEC_PER_MSEC); } static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) { return ktime_sub_ns(kt, usec * NSEC_PER_USEC); } static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec) { return ktime_sub_ns(kt, msec * NSEC_PER_MSEC); } extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); /** * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 * format only if the variable contains data * @kt: the ktime_t variable to convert * @ts: the timespec variable to store the result in * * Return: %true if there was a successful conversion, %false if kt was 0. */ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, struct timespec64 *ts) { if (kt) { *ts = ktime_to_timespec64(kt); return true; } else { return false; } } #include <vdso/ktime.h> static inline ktime_t ns_to_ktime(u64 ns) { return ns; } static inline ktime_t us_to_ktime(u64 us) { return us * NSEC_PER_USEC; } static inline ktime_t ms_to_ktime(u64 ms) { return ms * NSEC_PER_MSEC; } # include <linux/timekeeping.h> #endif
17 14 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SCSI_SCSI_DEVICE_H #define _SCSI_SCSI_DEVICE_H #include <linux/list.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/blk-mq.h> #include <scsi/scsi.h> #include <linux/atomic.h> #include <linux/sbitmap.h> struct bsg_device; struct device; struct request_queue; struct scsi_cmnd; struct scsi_lun; struct scsi_sense_hdr; typedef __u64 __bitwise blist_flags_t; #define SCSI_SENSE_BUFFERSIZE 96 struct scsi_mode_data { __u32 length; __u16 block_descriptor_length; __u8 medium_type; __u8 device_specific; __u8 header_length; __u8 longlba:1; }; /* * sdev state: If you alter this, you also need to alter scsi_sysfs.c * (for the ascii descriptions) and the state model enforcer: * scsi_lib:scsi_device_set_state(). */ enum scsi_device_state { SDEV_CREATED = 1, /* device created but not added to sysfs * Only internal commands allowed (for inq) */ SDEV_RUNNING, /* device properly configured * All commands allowed */ SDEV_CANCEL, /* beginning to delete device * Only error handler commands allowed */ SDEV_DEL, /* device deleted * no commands allowed */ SDEV_QUIESCE, /* Device quiescent. No block commands * will be accepted, only specials (which * originate in the mid-layer) */ SDEV_OFFLINE, /* Device offlined (by error handling or * user request */ SDEV_TRANSPORT_OFFLINE, /* Offlined by transport class error handler */ SDEV_BLOCK, /* Device blocked by scsi lld. No * scsi commands from user or midlayer * should be issued to the scsi * lld. */ SDEV_CREATED_BLOCK, /* same as above but for created devices */ }; enum scsi_scan_mode { SCSI_SCAN_INITIAL = 0, SCSI_SCAN_RESCAN, SCSI_SCAN_MANUAL, }; enum scsi_device_event { SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ SDEV_EVT_CAPACITY_CHANGE_REPORTED, /* 2A 09 UA reported */ SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */ SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */ SDEV_EVT_POWER_ON_RESET_OCCURRED, /* 29 00 UA reported */ SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, SDEV_EVT_LAST = SDEV_EVT_POWER_ON_RESET_OCCURRED, SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 }; struct scsi_event { enum scsi_device_event evt_type; struct list_head node; /* put union of data structures, for non-simple event types, * here */ }; /** * struct scsi_vpd - SCSI Vital Product Data * @rcu: For kfree_rcu(). * @len: Length in bytes of @data. * @data: VPD data as defined in various T10 SCSI standard documents. */ struct scsi_vpd { struct rcu_head rcu; int len; unsigned char data[]; }; struct scsi_device { struct Scsi_Host *host; struct request_queue *request_queue; /* the next two are protected by the host->host_lock */ struct list_head siblings; /* list of all devices on this host */ struct list_head same_target_siblings; /* just the devices sharing same target id */ struct sbitmap budget_map; atomic_t device_blocked; /* Device returned QUEUE_FULL. */ atomic_t restarts; spinlock_t list_lock; struct list_head starved_entry; unsigned short queue_depth; /* How deep of a queue we want */ unsigned short max_queue_depth; /* max queue depth */ unsigned short last_queue_full_depth; /* These two are used by */ unsigned short last_queue_full_count; /* scsi_track_queue_full() */ unsigned long last_queue_full_time; /* last queue full time */ unsigned long queue_ramp_up_period; /* ramp up period in jiffies */ #define SCSI_DEFAULT_RAMP_UP_PERIOD (120 * HZ) unsigned long last_queue_ramp_up; /* last queue ramp up time */ unsigned int id, channel; u64 lun; unsigned int manufacturer; /* Manufacturer of device, for using * vendor-specific cmd's */ unsigned sector_size; /* size in bytes */ void *hostdata; /* available to low-level driver */ unsigned char type; char scsi_level; char inq_periph_qual; /* PQ from INQUIRY data */ struct mutex inquiry_mutex; unsigned char inquiry_len; /* valid bytes in 'inquiry' */ unsigned char * inquiry; /* INQUIRY response data */ const char * vendor; /* [back_compat] point into 'inquiry' ... */ const char * model; /* ... after scan; point to static string */ const char * rev; /* ... "nullnullnullnull" before scan */ #define SCSI_DEFAULT_VPD_LEN 255 /* default SCSI VPD page size (max) */ struct scsi_vpd __rcu *vpd_pg0; struct scsi_vpd __rcu *vpd_pg83; struct scsi_vpd __rcu *vpd_pg80; struct scsi_vpd __rcu *vpd_pg89; struct scsi_vpd __rcu *vpd_pgb0; struct scsi_vpd __rcu *vpd_pgb1; struct scsi_vpd __rcu *vpd_pgb2; struct scsi_vpd __rcu *vpd_pgb7; struct scsi_target *sdev_target; blist_flags_t sdev_bflags; /* black/white flags as also found in * scsi_devinfo.[hc]. For now used only to * pass settings from sdev_init to scsi * core. */ unsigned int eh_timeout; /* Error handling timeout */ /* * If true, let the high-level device driver (sd) manage the device * power state for system suspend/resume (suspend to RAM and * hibernation) operations. */ unsigned manage_system_start_stop:1; /* * If true, let the high-level device driver (sd) manage the device * power state for runtime device suspand and resume operations. */ unsigned manage_runtime_start_stop:1; /* * If true, let the high-level device driver (sd) manage the device * power state for system shutdown (power off) operations. */ unsigned manage_shutdown:1; /* * If set and if the device is runtime suspended, ask the high-level * device driver (sd) to force a runtime resume of the device. */ unsigned force_runtime_start_on_system_start:1; unsigned removable:1; unsigned changed:1; /* Data invalid due to media change */ unsigned busy:1; /* Used to prevent races */ unsigned lockable:1; /* Able to prevent media removal */ unsigned locked:1; /* Media removal disabled */ unsigned borken:1; /* Tell the Seagate driver to be * painfully slow on this device */ unsigned disconnect:1; /* can disconnect */ unsigned soft_reset:1; /* Uses soft reset option */ unsigned sdtr:1; /* Device supports SDTR messages */ unsigned wdtr:1; /* Device supports WDTR messages */ unsigned ppr:1; /* Device supports PPR messages */ unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ unsigned simple_tags:1; /* simple queue tag messages are enabled */ unsigned was_reset:1; /* There was a bus reset on the bus for * this device */ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN * because we did a bus reset. */ unsigned use_10_for_rw:1; /* first try 10-byte read / write */ unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */ unsigned read_before_ms:1; /* perform a READ before MODE SENSE */ unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ unsigned no_write_same:1; /* no WRITE SAME command */ unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */ unsigned use_16_for_sync:1; /* Use sync (16) over sync (10) */ unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ unsigned skip_vpd_pages:1; /* do not read VPD pages */ unsigned try_vpd_pages:1; /* attempt to read VPD pages */ unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ unsigned no_start_on_add:1; /* do not issue start on add */ unsigned allow_restart:1; /* issue START_UNIT in error handler */ unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ unsigned select_no_atn:1; unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ unsigned last_sector_bug:1; /* do not use multisector accesses on SD_LAST_BUGGY_SECTORS */ unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */ unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */ unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */ unsigned security_supported:1; /* Supports Security Protocols */ unsigned is_visible:1; /* is the device visible in sysfs */ unsigned wce_default_on:1; /* Cache is ON by default */ unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ unsigned broken_fua:1; /* Don't set FUA bit */ unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device * creation time */ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */ unsigned silence_suspend:1; /* Do not print runtime PM related messages */ unsigned no_vpd_size:1; /* No VPD size reported in header */ unsigned cdl_supported:1; /* Command duration limits supported */ unsigned cdl_enable:1; /* Enable/disable Command duration limits */ unsigned int queue_stopped; /* request queue is quiesced */ bool offline_already; /* Device offline message logged */ unsigned int ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */ unsigned int ua_por_ctr; /* Counter for Power On / Reset UAs */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ DECLARE_BITMAP(pending_events, SDEV_EVT_MAXBITS); /* pending events */ struct list_head event_list; /* asserted events */ struct work_struct event_work; unsigned int max_device_blocked; /* what device_blocked counts down from */ #define SCSI_DEFAULT_DEVICE_BLOCKED 3 atomic_t iorequest_cnt; atomic_t iodone_cnt; atomic_t ioerr_cnt; atomic_t iotmo_cnt; struct device sdev_gendev, sdev_dev; struct work_struct requeue_work; struct scsi_device_handler *handler; void *handler_data; size_t dma_drain_len; void *dma_drain_buf; unsigned int sg_timeout; unsigned int sg_reserved_size; struct bsg_device *bsg_dev; unsigned char access_state; struct mutex state_mutex; enum scsi_device_state sdev_state; struct task_struct *quiesced_by; unsigned long sdev_data[]; } __attribute__((aligned(sizeof(unsigned long)))); #define to_scsi_device(d) \ container_of(d, struct scsi_device, sdev_gendev) #define class_to_sdev(d) \ container_of(d, struct scsi_device, sdev_dev) #define transport_class_to_sdev(class_dev) \ to_scsi_device(class_dev->parent) #define sdev_dbg(sdev, fmt, a...) \ dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) /* * like scmd_printk, but the device name is passed in * as a string pointer */ __printf(4, 5) void sdev_prefix_printk(const char *, const struct scsi_device *, const char *, const char *, ...); #define sdev_printk(l, sdev, fmt, a...) \ sdev_prefix_printk(l, sdev, NULL, fmt, ##a) __printf(3, 4) void scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); #define scmd_dbg(scmd, fmt, a...) \ do { \ struct request *__rq = scsi_cmd_to_rq((scmd)); \ \ if (__rq->q->disk) \ sdev_dbg((scmd)->device, "[%s] " fmt, \ __rq->q->disk->disk_name, ##a); \ else \ sdev_dbg((scmd)->device, fmt, ##a); \ } while (0) enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING, STARGET_REMOVE, STARGET_CREATED_REMOVE, STARGET_DEL, }; /* * scsi_target: representation of a scsi target, for now, this is only * used for single_lun devices. If no one has active IO to the target, * starget_sdev_user is NULL, else it points to the active sdev. */ struct scsi_target { struct scsi_device *starget_sdev_user; struct list_head siblings; struct list_head devices; struct device dev; struct kref reap_ref; /* last put renders target invisible */ unsigned int channel; unsigned int id; /* target id ... replace * scsi_device.id eventually */ unsigned int create:1; /* signal that it needs to be added */ unsigned int single_lun:1; /* Indicates we should only * allow I/O to one of the luns * for the device at a time. */ unsigned int pdt_1f_for_no_lun:1; /* PDT = 0x1f * means no lun present. */ unsigned int no_report_luns:1; /* Don't use * REPORT LUNS for scanning. */ unsigned int expecting_lun_change:1; /* A device has reported * a 3F/0E UA, other devices on * the same target will also. */ /* commands actually active on LLD. */ atomic_t target_busy; atomic_t target_blocked; /* * LLDs should set this in the sdev_init host template callout. * If set to zero then there is not limit. */ unsigned int can_queue; unsigned int max_target_blocked; #define SCSI_DEFAULT_TARGET_BLOCKED 3 char scsi_level; enum scsi_target_state state; void *hostdata; /* available to low-level driver */ unsigned long starget_data[]; /* for the transport */ /* starget_data must be the last element!!!! */ } __attribute__((aligned(sizeof(unsigned long)))); #define to_scsi_target(d) container_of(d, struct scsi_target, dev) static inline struct scsi_target *scsi_target(struct scsi_device *sdev) { return to_scsi_target(sdev->sdev_gendev.parent); } #define transport_class_to_starget(class_dev) \ to_scsi_target(class_dev->parent) #define starget_printk(prefix, starget, fmt, a...) \ dev_printk(prefix, &(starget)->dev, fmt, ##a) extern struct scsi_device *__scsi_add_device(struct Scsi_Host *, uint, uint, u64, void *hostdata); extern int scsi_add_device(struct Scsi_Host *host, uint channel, uint target, u64 lun); extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh); extern void scsi_remove_device(struct scsi_device *); extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); void scsi_attach_vpd(struct scsi_device *sdev); void scsi_cdl_check(struct scsi_device *sdev); int scsi_cdl_enable(struct scsi_device *sdev, bool enable); extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); extern int __must_check scsi_device_get(struct scsi_device *); extern void scsi_device_put(struct scsi_device *); extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, uint, uint, u64); extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *, uint, uint, u64); extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *, u64); extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *, u64); extern void starget_for_each_device(struct scsi_target *, void *, void (*fn)(struct scsi_device *, void *)); extern void __starget_for_each_device(struct scsi_target *, void *, void (*fn)(struct scsi_device *, void *)); /* only exposed to implement shost_for_each_device */ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, struct scsi_device *); /** * shost_for_each_device - iterate over all devices of a host * @sdev: the &struct scsi_device to use as a cursor * @shost: the &struct scsi_host to iterate over * * Iterator that returns each device attached to @shost. This loop * takes a reference on each device and releases it at the end. If * you break out of the loop, you must call scsi_device_put(sdev). */ #define shost_for_each_device(sdev, shost) \ for ((sdev) = __scsi_iterate_devices((shost), NULL); \ (sdev); \ (sdev) = __scsi_iterate_devices((shost), (sdev))) /** * __shost_for_each_device - iterate over all devices of a host (UNLOCKED) * @sdev: the &struct scsi_device to use as a cursor * @shost: the &struct scsi_host to iterate over * * Iterator that returns each device attached to @shost. It does _not_ * take a reference on the scsi_device, so the whole loop must be * protected by shost->host_lock. * * Note: The only reason to use this is because you need to access the * device list in interrupt context. Otherwise you really want to use * shost_for_each_device instead. */ #define __shost_for_each_device(sdev, shost) \ list_for_each_entry((sdev), &((shost)->__devices), siblings) extern int scsi_change_queue_depth(struct scsi_device *, int); extern int scsi_track_queue_full(struct scsi_device *, int); extern int scsi_set_medium_removal(struct scsi_device *, char); int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *); extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *); extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, struct scsi_sense_hdr *sshdr); extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, int buf_len); int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, unsigned int len, unsigned char opcode, unsigned short sa); extern int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state); extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, gfp_t gfpflags); extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt); extern void sdev_evt_send_simple(struct scsi_device *sdev, enum scsi_device_event evt_type, gfp_t gfpflags); extern int scsi_device_quiesce(struct scsi_device *sdev); extern void scsi_device_resume(struct scsi_device *sdev); extern void scsi_target_quiesce(struct scsi_target *); extern void scsi_target_resume(struct scsi_target *); extern void scsi_scan_target(struct device *parent, unsigned int channel, unsigned int id, u64 lun, enum scsi_scan_mode rescan); extern void scsi_target_reap(struct scsi_target *); void scsi_block_targets(struct Scsi_Host *shost, struct device *dev); extern void scsi_target_unblock(struct device *, enum scsi_device_state); extern void scsi_remove_target(struct device *); extern const char *scsi_device_state_name(enum scsi_device_state); extern int scsi_is_sdev_device(const struct device *); extern int scsi_is_target_device(const struct device *); extern void scsi_sanitize_inquiry_string(unsigned char *s, int len); /* * scsi_execute_cmd users can set scsi_failure.result to have * scsi_check_passthrough fail/retry a command. scsi_failure.result can be a * specific host byte or message code, or SCMD_FAILURE_RESULT_ANY can be used * to match any host or message code. */ #define SCMD_FAILURE_RESULT_ANY 0x7fffffff /* * Set scsi_failure.result to SCMD_FAILURE_STAT_ANY to fail/retry any failure * scsi_status_is_good returns false for. */ #define SCMD_FAILURE_STAT_ANY 0xff /* * The following can be set to the scsi_failure sense, asc and ascq fields to * match on any sense, ASC, or ASCQ value. */ #define SCMD_FAILURE_SENSE_ANY 0xff #define SCMD_FAILURE_ASC_ANY 0xff #define SCMD_FAILURE_ASCQ_ANY 0xff /* Always retry a matching failure. */ #define SCMD_FAILURE_NO_LIMIT -1 struct scsi_failure { int result; u8 sense; u8 asc; u8 ascq; /* * Number of times scsi_execute_cmd will retry the failure. It does * not count for the total_allowed. */ s8 allowed; /* Number of times the failure has been retried. */ s8 retries; }; struct scsi_failures { /* * If a scsi_failure does not have a retry limit setup this limit will * be used. */ int total_allowed; int total_retries; struct scsi_failure *failure_definitions; }; /* Optional arguments to scsi_execute_cmd */ struct scsi_exec_args { unsigned char *sense; /* sense buffer */ unsigned int sense_len; /* sense buffer len */ struct scsi_sense_hdr *sshdr; /* decoded sense header */ blk_mq_req_flags_t req_flags; /* BLK_MQ_REQ flags */ int scmd_flags; /* SCMD flags */ int *resid; /* residual length */ struct scsi_failures *failures; /* failures to retry */ }; int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, blk_opf_t opf, void *buffer, unsigned int bufflen, int timeout, int retries, const struct scsi_exec_args *args); void scsi_failures_reset_retries(struct scsi_failures *failures); extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t); extern int scsi_vpd_tpg_id(struct scsi_device *, int *); #ifdef CONFIG_PM extern int scsi_autopm_get_device(struct scsi_device *); extern void scsi_autopm_put_device(struct scsi_device *); #else static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } static inline void scsi_autopm_put_device(struct scsi_device *d) {} #endif /* CONFIG_PM */ static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) { return device_reprobe(&sdev->sdev_gendev); } static inline unsigned int sdev_channel(struct scsi_device *sdev) { return sdev->channel; } static inline unsigned int sdev_id(struct scsi_device *sdev) { return sdev->id; } #define scmd_id(scmd) sdev_id((scmd)->device) #define scmd_channel(scmd) sdev_channel((scmd)->device) /* * checks for positions of the SCSI state machine */ static inline int scsi_device_online(struct scsi_device *sdev) { return (sdev->sdev_state != SDEV_OFFLINE && sdev->sdev_state != SDEV_TRANSPORT_OFFLINE && sdev->sdev_state != SDEV_DEL); } static inline int scsi_device_blocked(struct scsi_device *sdev) { return sdev->sdev_state == SDEV_BLOCK || sdev->sdev_state == SDEV_CREATED_BLOCK; } static inline int scsi_device_created(struct scsi_device *sdev) { return sdev->sdev_state == SDEV_CREATED || sdev->sdev_state == SDEV_CREATED_BLOCK; } int scsi_internal_device_block_nowait(struct scsi_device *sdev); int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, enum scsi_device_state new_state); /* accessor functions for the SCSI parameters */ static inline int scsi_device_sync(struct scsi_device *sdev) { return sdev->sdtr; } static inline int scsi_device_wide(struct scsi_device *sdev) { return sdev->wdtr; } static inline int scsi_device_dt(struct scsi_device *sdev) { return sdev->ppr; } static inline int scsi_device_dt_only(struct scsi_device *sdev) { if (sdev->inquiry_len < 57) return 0; return (sdev->inquiry[56] & 0x0c) == 0x04; } static inline int scsi_device_ius(struct scsi_device *sdev) { if (sdev->inquiry_len < 57) return 0; return sdev->inquiry[56] & 0x01; } static inline int scsi_device_qas(struct scsi_device *sdev) { if (sdev->inquiry_len < 57) return 0; return sdev->inquiry[56] & 0x02; } static inline int scsi_device_enclosure(struct scsi_device *sdev) { return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; } static inline int scsi_device_protection(struct scsi_device *sdev) { if (sdev->no_dif) return 0; return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); } static inline int scsi_device_tpgs(struct scsi_device *sdev) { return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; } /** * scsi_device_supports_vpd - test if a device supports VPD pages * @sdev: the &struct scsi_device to test * * If the 'try_vpd_pages' flag is set it takes precedence. * Otherwise we will assume VPD pages are supported if the * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set. */ static inline int scsi_device_supports_vpd(struct scsi_device *sdev) { /* Attempt VPD inquiry if the device blacklist explicitly calls * for it. */ if (sdev->try_vpd_pages) return 1; /* * Although VPD inquiries can go to SCSI-2 type devices, * some USB ones crash on receiving them, and the pages * we currently ask for are mandatory for SPC-2 and beyond */ if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages) return 1; return 0; } static inline int scsi_device_busy(struct scsi_device *sdev) { return sbitmap_weight(&sdev->budget_map); } /* Macros to access the UNIT ATTENTION counters */ #define scsi_get_ua_new_media_ctr(sdev) \ ((const unsigned int)(sdev->ua_new_media_ctr)) #define scsi_get_ua_por_ctr(sdev) \ ((const unsigned int)(sdev->ua_por_ctr)) #define MODULE_ALIAS_SCSI_DEVICE(type) \ MODULE_ALIAS("scsi:t-" __stringify(type) "*") #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" #endif /* _SCSI_SCSI_DEVICE_H */
111 112 112 97 108 112 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 // SPDX-License-Identifier: GPL-2.0-only #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> #include <net/busy_poll.h> #include <net/net_namespace.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/sock.h> #include <net/xdp.h> #include <net/xdp_sock.h> #include <net/page_pool/memory_provider.h> #include "dev.h" #include "devmem.h" #include "netdev-genl-gen.h" struct netdev_nl_dump_ctx { unsigned long ifindex; unsigned int rxq_idx; unsigned int txq_idx; unsigned int napi_id; }; static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) { NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); return (struct netdev_nl_dump_ctx *)cb->ctx; } static int netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { u64 xsk_features = 0; u64 xdp_rx_meta = 0; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ xdp_rx_meta |= flag; XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC if (netdev->xsk_tx_metadata_ops) { if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time) xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO; } if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, netdev->xdp_features, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, xdp_rx_meta, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, xsk_features, NETDEV_A_DEV_PAD)) goto err_cancel_msg; if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, netdev->xdp_zc_max_segs)) goto err_cancel_msg; } genlmsg_end(rsp, hdr); return 0; err_cancel_msg: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static void netdev_genl_dev_notify(struct net_device *netdev, int cmd) { struct genl_info info; struct sk_buff *ntf; if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), NETDEV_NLGRP_MGMT)) return; genl_info_init_ntf(&info, &netdev_nl_family, cmd); ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!ntf) return; if (netdev_nl_dev_fill(netdev, ntf, &info)) { nlmsg_free(ntf); return; } genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); } int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net_device *netdev; struct sk_buff *rsp; u32 ifindex; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; rtnl_lock(); netdev = __dev_get_by_index(genl_info_net(info), ifindex); if (netdev) err = netdev_nl_dev_fill(netdev, rsp, info); else err = -ENODEV; rtnl_unlock(); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; int err = 0; rtnl_lock(); for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); if (err < 0) break; } rtnl_unlock(); return err; } static int netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, const struct genl_info *info) { unsigned long irq_suspend_timeout; unsigned long gro_flush_timeout; u32 napi_defer_hard_irqs; void *hdr; pid_t pid; if (!napi->dev->up) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) goto nla_put_failure; if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) goto nla_put_failure; if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) goto nla_put_failure; if (napi->thread) { pid = task_pid_nr(napi->thread); if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) goto nla_put_failure; } napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, napi_defer_hard_irqs)) goto nla_put_failure; irq_suspend_timeout = napi_get_irq_suspend_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, irq_suspend_timeout)) goto nla_put_failure; gro_flush_timeout = napi_get_gro_flush_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, gro_flush_timeout)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; struct sk_buff *rsp; u32 napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_fill_one(rsp, napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } if (err) { goto err_free_msg; } else if (!rsp->len) { err = -ENOENT; goto err_free_msg; } return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { struct napi_struct *napi; unsigned int prev_id; int err = 0; if (!netdev->up) return err; prev_id = UINT_MAX; list_for_each_entry(napi, &netdev->napi_list, dev_list) { if (!napi_id_valid(napi->napi_id)) continue; /* Dump continuation below depends on the list being sorted */ WARN_ON_ONCE(napi->napi_id >= prev_id); prev_id = napi->napi_id; if (ctx->napi_id && napi->napi_id >= ctx->napi_id) continue; err = netdev_nl_napi_fill_one(rsp, napi, info); if (err) return err; ctx->napi_id = napi->napi_id; } return err; } int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_NAPI_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock(net, ifindex); if (netdev) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); netdev_unlock(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->napi_id = 0; } } return err; } static int netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) { u64 irq_suspend_timeout = 0; u64 gro_flush_timeout = 0; u32 defer = 0; if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); napi_set_defer_hard_irqs(napi, defer); } if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) { irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]); napi_set_irq_suspend_timeout(napi, irq_suspend_timeout); } if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) { gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]); napi_set_gro_flush_timeout(napi, gro_flush_timeout); } return 0; } int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; unsigned int napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_set_config(napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } return err; } static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi) { if (napi && napi_id_valid(napi->napi_id)) return nla_put_u32(skb, NETDEV_A_QUEUE_NAPI_ID, napi->napi_id); return 0; } static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { struct pp_memory_provider_params *params; struct netdev_rx_queue *rxq; struct netdev_queue *txq; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: rxq = __netif_get_rx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, rxq->napi)) goto nla_put_failure; params = &rxq->mp_params; if (params->mp_ops && params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (rxq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, txq->napi)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (txq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; } genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, u32 q_type) { switch (q_type) { case NETDEV_QUEUE_TYPE_RX: if (q_id >= netdev->real_num_rx_queues) return -EINVAL; return 0; case NETDEV_QUEUE_TYPE_TX: if (q_id >= netdev->real_num_tx_queues) return -EINVAL; } return 0; } static int netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { int err; if (!netdev->up) return -ENOENT; err = netdev_nl_queue_validate(netdev, q_idx, q_type); if (err) return err; return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); } int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) { u32 q_id, q_type, ifindex; struct net_device *netdev; struct sk_buff *rsp; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) return -EINVAL; q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; rtnl_lock(); netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (netdev) { err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); netdev_unlock(netdev); } else { err = -ENODEV; } rtnl_unlock(); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { int err = 0; if (!netdev->up) return err; for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, NETDEV_QUEUE_TYPE_RX, info); if (err) return err; } for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, NETDEV_QUEUE_TYPE_TX, info); if (err) return err; } return err; } int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rtnl_lock(); if (ifindex) { netdev = netdev_get_by_index_lock(net, ifindex); if (netdev) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); netdev_unlock(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->rxq_idx = 0; ctx->txq_idx = 0; } } rtnl_unlock(); return err; } #define NETDEV_STAT_NOT_SET (~0ULL) static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) { const u64 *add = _add; u64 *sum = _sum; while (size) { if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) *sum += *add; sum++; add++; size -= 8; } } static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) { if (value == NETDEV_STAT_NOT_SET) return 0; return nla_put_uint(rsp, attr_id, value); } static int netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_COMPLETE, rx->csum_complete) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, u32 q_type, int i, const struct genl_info *info) { const struct netdev_stat_ops *ops = netdev->stat_ops; struct netdev_queue_stats_rx rx; struct netdev_queue_stats_tx tx; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: memset(&rx, 0xff, sizeof(rx)); ops->get_queue_stats_rx(netdev, i, &rx); if (!memchr_inv(&rx, 0xff, sizeof(rx))) goto nla_cancel; if (netdev_nl_stats_write_rx(rsp, &rx)) goto nla_put_failure; break; case NETDEV_QUEUE_TYPE_TX: memset(&tx, 0xff, sizeof(tx)); ops->get_queue_stats_tx(netdev, i, &tx); if (!memchr_inv(&tx, 0xff, sizeof(tx))) goto nla_cancel; if (netdev_nl_stats_write_tx(rsp, &tx)) goto nla_put_failure; break; } genlmsg_end(rsp, hdr); return 0; nla_cancel: genlmsg_cancel(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { const struct netdev_stat_ops *ops = netdev->stat_ops; int i, err; if (!(netdev->flags & IFF_UP)) return 0; i = ctx->rxq_idx; while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, i, info); if (err) return err; ctx->rxq_idx = ++i; } i = ctx->txq_idx; while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, i, info); if (err) return err; ctx->txq_idx = ++i; } ctx->rxq_idx = 0; ctx->txq_idx = 0; return 0; } static int netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { struct netdev_queue_stats_rx rx_sum, rx; struct netdev_queue_stats_tx tx_sum, tx; const struct netdev_stat_ops *ops; void *hdr; int i; ops = netdev->stat_ops; /* Netdev can't guarantee any complete counters */ if (!ops->get_base_stats) return 0; memset(&rx_sum, 0xff, sizeof(rx_sum)); memset(&tx_sum, 0xff, sizeof(tx_sum)); ops->get_base_stats(netdev, &rx_sum, &tx_sum); /* The op was there, but nothing reported, don't bother */ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) goto nla_put_failure; for (i = 0; i < netdev->real_num_rx_queues; i++) { memset(&rx, 0xff, sizeof(rx)); if (ops->get_queue_stats_rx) ops->get_queue_stats_rx(netdev, i, &rx); netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); } for (i = 0; i < netdev->real_num_tx_queues; i++) { memset(&tx, 0xff, sizeof(tx)); if (ops->get_queue_stats_tx) ops->get_queue_stats_tx(netdev, i, &tx); netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); } if (netdev_nl_stats_write_rx(rsp, &rx_sum) || netdev_nl_stats_write_tx(rsp, &tx_sum)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, struct sk_buff *skb, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { if (!netdev->stat_ops) return 0; switch (scope) { case 0: return netdev_nl_stats_by_netdev(netdev, skb, info); case NETDEV_QSTATS_SCOPE_QUEUE: return netdev_nl_stats_by_queue(netdev, skb, info, ctx); } return -EINVAL; /* Should not happen, per netlink policy */ } int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; unsigned int ifindex; unsigned int scope; int err = 0; scope = 0; if (info->attrs[NETDEV_A_QSTATS_SCOPE]) scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); ifindex = 0; if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); rtnl_lock(); if (ifindex) { netdev = __dev_get_by_index(net, ifindex); if (netdev && netdev->stat_ops) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QSTATS_IFINDEX]); err = netdev ? -EOPNOTSUPP : -ENODEV; } } else { for_each_netdev_dump(net, netdev, ctx->ifindex) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); if (err < 0) break; } } rtnl_unlock(); return err; } int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; struct net_devmem_dmabuf_binding *binding; u32 ifindex, dmabuf_fd, rxq_idx; struct netdev_nl_sock *priv; struct net_device *netdev; struct sk_buff *rsp; struct nlattr *attr; int rem, err = 0; void *hdr; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); if (IS_ERR(priv)) return PTR_ERR(priv); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; hdr = genlmsg_iput(rsp, info); if (!hdr) { err = -EMSGSIZE; goto err_genlmsg_free; } mutex_lock(&priv->lock); err = 0; netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev) { err = -ENODEV; goto err_unlock_sock; } if (!netif_device_present(netdev)) err = -ENODEV; else if (!netdev_need_ops_lock(netdev)) err = -EOPNOTSUPP; if (err) { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_DEV_IFINDEX]); goto err_unlock; } binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); goto err_unlock; } nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, genlmsg_data(info->genlhdr), genlmsg_len(info->genlhdr), rem) { err = nla_parse_nested( tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, netdev_queue_id_nl_policy, info->extack); if (err < 0) goto err_unbind; if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { err = -EINVAL; goto err_unbind; } if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); err = -EINVAL; goto err_unbind; } rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, info->extack); if (err) goto err_unbind; } list_add(&binding->list, &priv->bindings); nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); err = genlmsg_reply(rsp, info); if (err) goto err_unbind; netdev_unlock(netdev); mutex_unlock(&priv->lock); return 0; err_unbind: net_devmem_unbind_dmabuf(binding); err_unlock: netdev_unlock(netdev); err_unlock_sock: mutex_unlock(&priv->lock); err_genlmsg_free: nlmsg_free(rsp); return err; } void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv) { INIT_LIST_HEAD(&priv->bindings); mutex_init(&priv->lock); } void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv) { struct net_devmem_dmabuf_binding *binding; struct net_devmem_dmabuf_binding *temp; struct net_device *dev; mutex_lock(&priv->lock); list_for_each_entry_safe(binding, temp, &priv->bindings, list) { dev = binding->dev; netdev_lock(dev); net_devmem_unbind_dmabuf(binding); netdev_unlock(dev); } mutex_unlock(&priv->lock); } static int netdev_genl_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_REGISTER: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); break; case NETDEV_UNREGISTER: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); break; case NETDEV_XDP_FEAT_CHANGE: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); break; } return NOTIFY_OK; } static struct notifier_block netdev_genl_nb = { .notifier_call = netdev_genl_netdevice_event, }; static int __init netdev_genl_init(void) { int err; err = register_netdevice_notifier(&netdev_genl_nb); if (err) return err; err = genl_register_family(&netdev_nl_family); if (err) goto err_unreg_ntf; return 0; err_unreg_ntf: unregister_netdevice_notifier(&netdev_genl_nb); return err; } subsys_initcall(netdev_genl_init);
147 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Media entity * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> */ #ifndef _MEDIA_ENTITY_H #define _MEDIA_ENTITY_H #include <linux/bitmap.h> #include <linux/bug.h> #include <linux/container_of.h> #include <linux/fwnode.h> #include <linux/list.h> #include <linux/media.h> #include <linux/minmax.h> #include <linux/types.h> /* Enums used internally at the media controller to represent graphs */ /** * enum media_gobj_type - type of a graph object * * @MEDIA_GRAPH_ENTITY: Identify a media entity * @MEDIA_GRAPH_PAD: Identify a media pad * @MEDIA_GRAPH_LINK: Identify a media link * @MEDIA_GRAPH_INTF_DEVNODE: Identify a media Kernel API interface via * a device node */ enum media_gobj_type { MEDIA_GRAPH_ENTITY, MEDIA_GRAPH_PAD, MEDIA_GRAPH_LINK, MEDIA_GRAPH_INTF_DEVNODE, }; #define MEDIA_BITS_PER_TYPE 8 #define MEDIA_BITS_PER_ID (32 - MEDIA_BITS_PER_TYPE) #define MEDIA_ID_MASK GENMASK_ULL(MEDIA_BITS_PER_ID - 1, 0) /* Structs to represent the objects that belong to a media graph */ /** * struct media_gobj - Define a graph object. * * @mdev: Pointer to the struct &media_device that owns the object * @id: Non-zero object ID identifier. The ID should be unique * inside a media_device, as it is composed by * %MEDIA_BITS_PER_TYPE to store the type plus * %MEDIA_BITS_PER_ID to store the ID * @list: List entry stored in one of the per-type mdev object lists * * All objects on the media graph should have this struct embedded */ struct media_gobj { struct media_device *mdev; u32 id; struct list_head list; }; #define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 /** * struct media_entity_enum - An enumeration of media entities. * * @bmap: Bit map in which each bit represents one entity at struct * media_entity->internal_idx. * @idx_max: Number of bits in bmap */ struct media_entity_enum { unsigned long *bmap; int idx_max; }; /** * struct media_graph - Media graph traversal state * * @stack: Graph traversal stack; the stack contains information * on the path the media entities to be walked and the * links through which they were reached. * @stack.entity: pointer to &struct media_entity at the graph. * @stack.link: pointer to &struct list_head. * @ent_enum: Visited entities * @top: The top of the stack */ struct media_graph { struct { struct media_entity *entity; struct list_head *link; } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH]; struct media_entity_enum ent_enum; int top; }; /** * struct media_pipeline - Media pipeline related information * * @allocated: Media pipeline allocated and freed by the framework * @mdev: The media device the pipeline is part of * @pads: List of media_pipeline_pad * @start_count: Media pipeline start - stop count */ struct media_pipeline { bool allocated; struct media_device *mdev; struct list_head pads; int start_count; }; /** * struct media_pipeline_pad - A pad part of a media pipeline * * @list: Entry in the media_pad pads list * @pipe: The media_pipeline that the pad is part of * @pad: The media pad * * This structure associate a pad with a media pipeline. Instances of * media_pipeline_pad are created by media_pipeline_start() when it builds the * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop() * removes the entries from the list and deletes them. */ struct media_pipeline_pad { struct list_head list; struct media_pipeline *pipe; struct media_pad *pad; }; /** * struct media_pipeline_pad_iter - Iterator for media_pipeline_for_each_pad * * @cursor: The current element */ struct media_pipeline_pad_iter { struct list_head *cursor; }; /** * struct media_pipeline_entity_iter - Iterator for media_pipeline_for_each_entity * * @ent_enum: The entity enumeration tracker * @cursor: The current element */ struct media_pipeline_entity_iter { struct media_entity_enum ent_enum; struct list_head *cursor; }; /** * struct media_link - A link object part of a media graph. * * @graph_obj: Embedded structure containing the media object common data * @list: Linked list associated with an entity or an interface that * owns the link. * @gobj0: Part of a union. Used to get the pointer for the first * graph_object of the link. * @source: Part of a union. Used only if the first object (gobj0) is * a pad. In that case, it represents the source pad. * @intf: Part of a union. Used only if the first object (gobj0) is * an interface. * @gobj1: Part of a union. Used to get the pointer for the second * graph_object of the link. * @sink: Part of a union. Used only if the second object (gobj1) is * a pad. In that case, it represents the sink pad. * @entity: Part of a union. Used only if the second object (gobj1) is * an entity. * @reverse: Pointer to the link for the reverse direction of a pad to pad * link. * @flags: Link flags, as defined in uapi/media.h (MEDIA_LNK_FL_*) * @is_backlink: Indicate if the link is a backlink. */ struct media_link { struct media_gobj graph_obj; struct list_head list; union { struct media_gobj *gobj0; struct media_pad *source; struct media_interface *intf; }; union { struct media_gobj *gobj1; struct media_pad *sink; struct media_entity *entity; }; struct media_link *reverse; unsigned long flags; bool is_backlink; }; /** * enum media_pad_signal_type - type of the signal inside a media pad * * @PAD_SIGNAL_DEFAULT: * Default signal. Use this when all inputs or all outputs are * uniquely identified by the pad number. * @PAD_SIGNAL_ANALOG: * The pad contains an analog signal. It can be Radio Frequency, * Intermediate Frequency, a baseband signal or sub-carriers. * Tuner inputs, IF-PLL demodulators, composite and s-video signals * should use it. * @PAD_SIGNAL_DV: * Contains a digital video signal, with can be a bitstream of samples * taken from an analog TV video source. On such case, it usually * contains the VBI data on it. * @PAD_SIGNAL_AUDIO: * Contains an Intermediate Frequency analog signal from an audio * sub-carrier or an audio bitstream. IF signals are provided by tuners * and consumed by audio AM/FM decoders. Bitstream audio is provided by * an audio decoder. */ enum media_pad_signal_type { PAD_SIGNAL_DEFAULT = 0, PAD_SIGNAL_ANALOG, PAD_SIGNAL_DV, PAD_SIGNAL_AUDIO, }; /** * struct media_pad - A media pad graph object. * * @graph_obj: Embedded structure containing the media object common data * @entity: Entity this pad belongs to * @index: Pad index in the entity pads array, numbered from 0 to n * @num_links: Number of links connected to this pad * @sig_type: Type of the signal inside a media pad * @flags: Pad flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_PAD_FL_*``) * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to * access this field. */ struct media_pad { struct media_gobj graph_obj; /* must be first field in struct */ struct media_entity *entity; u16 index; u16 num_links; enum media_pad_signal_type sig_type; unsigned long flags; /* * The fields below are private, and should only be accessed via * appropriate functions. */ struct media_pipeline *pipe; }; /** * struct media_entity_operations - Media entity operations * @get_fwnode_pad: Return the pad number based on a fwnode endpoint or * a negative value on error. This operation can be used * to map a fwnode to a media pad number. Optional. * @link_setup: Notify the entity of link changes. The operation can * return an error, in which case link setup will be * cancelled. Optional. * @link_validate: Return whether a link is valid from the entity point of * view. The media_pipeline_start() function * validates all links by calling this operation. Optional. * @has_pad_interdep: Return whether two pads of the entity are * interdependent. If two pads are interdependent they are * part of the same pipeline and enabling one of the pads * means that the other pad will become "locked" and * doesn't allow configuration changes. pad0 and pad1 are * guaranteed to not both be sinks or sources. Never call * the .has_pad_interdep() operation directly, always use * media_entity_has_pad_interdep(). * Optional: If the operation isn't implemented all pads * will be considered as interdependent. * * .. note:: * * Those these callbacks are called with struct &media_device.graph_mutex * mutex held. */ struct media_entity_operations { int (*get_fwnode_pad)(struct media_entity *entity, struct fwnode_endpoint *endpoint); int (*link_setup)(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags); int (*link_validate)(struct media_link *link); bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0, unsigned int pad1); }; /** * enum media_entity_type - Media entity type * * @MEDIA_ENTITY_TYPE_BASE: * The entity isn't embedded in another subsystem structure. * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE: * The entity is embedded in a struct video_device instance. * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV: * The entity is embedded in a struct v4l2_subdev instance. * * Media entity objects are often not instantiated directly, but the media * entity structure is inherited by (through embedding) other subsystem-specific * structures. The media entity type identifies the type of the subclass * structure that implements a media entity instance. * * This allows runtime type identification of media entities and safe casting to * the correct object type. For instance, a media entity structure instance * embedded in a v4l2_subdev structure instance will have the type * %MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a &v4l2_subdev * structure using the container_of() macro. */ enum media_entity_type { MEDIA_ENTITY_TYPE_BASE, MEDIA_ENTITY_TYPE_VIDEO_DEVICE, MEDIA_ENTITY_TYPE_V4L2_SUBDEV, }; /** * struct media_entity - A media entity graph object. * * @graph_obj: Embedded structure containing the media object common data. * @name: Entity name. * @obj_type: Type of the object that implements the media_entity. * @function: Entity main function, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_ENT_F_*``) * @flags: Entity flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_ENT_FL_*``) * @num_pads: Number of sink and source pads. * @num_links: Total number of links, forward and back, enabled and disabled. * @num_backlinks: Number of backlinks * @internal_idx: An unique internal entity specific number. The numbers are * re-used if entities are unregistered or registered again. * @pads: Pads array with the size defined by @num_pads. * @links: List of data links. * @ops: Entity operations. * @use_count: Use count for the entity. * @info: Union with devnode information. Kept just for backward * compatibility. * @info.dev: Contains device major and minor info. * @info.dev.major: device node major, if the device is a devnode. * @info.dev.minor: device node minor, if the device is a devnode. * * .. note:: * * The @use_count reference count must never be negative, but is a signed * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect * reference count bugs that would make it negative. */ struct media_entity { struct media_gobj graph_obj; /* must be first field in struct */ const char *name; enum media_entity_type obj_type; u32 function; unsigned long flags; u16 num_pads; u16 num_links; u16 num_backlinks; int internal_idx; struct media_pad *pads; struct list_head links; const struct media_entity_operations *ops; int use_count; union { struct { u32 major; u32 minor; } dev; } info; }; /** * media_entity_for_each_pad - Iterate on all pads in an entity * @entity: The entity the pads belong to * @iter: The iterator pad * * Iterate on all pads in a media entity. */ #define media_entity_for_each_pad(entity, iter) \ for (iter = (entity)->pads; \ iter < &(entity)->pads[(entity)->num_pads]; \ ++iter) /** * struct media_interface - A media interface graph object. * * @graph_obj: embedded graph object * @links: List of links pointing to graph entities * @type: Type of the interface as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_INTF_T_*``) * @flags: Interface flags as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_INTF_FL_*``) * * .. note:: * * Currently, no flags for &media_interface is defined. */ struct media_interface { struct media_gobj graph_obj; struct list_head links; u32 type; u32 flags; }; /** * struct media_intf_devnode - A media interface via a device node. * * @intf: embedded interface object * @major: Major number of a device node * @minor: Minor number of a device node */ struct media_intf_devnode { struct media_interface intf; /* Should match the fields at media_v2_intf_devnode */ u32 major; u32 minor; }; /** * media_entity_id() - return the media entity graph object id * * @entity: pointer to &media_entity */ static inline u32 media_entity_id(struct media_entity *entity) { return entity->graph_obj.id; } /** * media_type() - return the media object type * * @gobj: Pointer to the struct &media_gobj graph object */ static inline enum media_gobj_type media_type(struct media_gobj *gobj) { return gobj->id >> MEDIA_BITS_PER_ID; } /** * media_id() - return the media object ID * * @gobj: Pointer to the struct &media_gobj graph object */ static inline u32 media_id(struct media_gobj *gobj) { return gobj->id & MEDIA_ID_MASK; } /** * media_gobj_gen_id() - encapsulates type and ID on at the object ID * * @type: object type as define at enum &media_gobj_type. * @local_id: next ID, from struct &media_device.id. */ static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id) { u32 id; id = type << MEDIA_BITS_PER_ID; id |= local_id & MEDIA_ID_MASK; return id; } /** * is_media_entity_v4l2_video_device() - Check if the entity is a video_device * @entity: pointer to entity * * Return: %true if the entity is an instance of a video_device object and can * safely be cast to a struct video_device using the container_of() macro, or * %false otherwise. */ static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity) { return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE; } /** * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev * @entity: pointer to entity * * Return: %true if the entity is an instance of a &v4l2_subdev object and can * safely be cast to a struct &v4l2_subdev using the container_of() macro, or * %false otherwise. */ static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity) { return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV; } /** * media_entity_enum_init - Initialise an entity enumeration * * @ent_enum: Entity enumeration to be initialised * @mdev: The related media device * * Return: zero on success or a negative error code. */ __must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, struct media_device *mdev); /** * media_entity_enum_cleanup - Release resources of an entity enumeration * * @ent_enum: Entity enumeration to be released */ void media_entity_enum_cleanup(struct media_entity_enum *ent_enum); /** * media_entity_enum_zero - Clear the entire enum * * @ent_enum: Entity enumeration to be cleared */ static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) { bitmap_zero(ent_enum->bmap, ent_enum->idx_max); } /** * media_entity_enum_set - Mark a single entity in the enum * * @ent_enum: Entity enumeration * @entity: Entity to be marked */ static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return; __set_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_clear - Unmark a single entity in the enum * * @ent_enum: Entity enumeration * @entity: Entity to be unmarked */ static inline void media_entity_enum_clear(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return; __clear_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_test - Test whether the entity is marked * * @ent_enum: Entity enumeration * @entity: Entity to be tested * * Returns %true if the entity was marked. */ static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return true; return test_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_test_and_set - Test whether the entity is marked, * and mark it * * @ent_enum: Entity enumeration * @entity: Entity to be tested * * Returns %true if the entity was marked, and mark it before doing so. */ static inline bool media_entity_enum_test_and_set(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return true; return __test_and_set_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_empty - Test whether the entire enum is empty * * @ent_enum: Entity enumeration * * Return: %true if the entity was empty. */ static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum) { return bitmap_empty(ent_enum->bmap, ent_enum->idx_max); } /** * media_entity_enum_intersects - Test whether two enums intersect * * @ent_enum1: First entity enumeration * @ent_enum2: Second entity enumeration * * Return: %true if entity enumerations @ent_enum1 and @ent_enum2 intersect, * otherwise %false. */ static inline bool media_entity_enum_intersects( struct media_entity_enum *ent_enum1, struct media_entity_enum *ent_enum2) { WARN_ON(ent_enum1->idx_max != ent_enum2->idx_max); return bitmap_intersects(ent_enum1->bmap, ent_enum2->bmap, min(ent_enum1->idx_max, ent_enum2->idx_max)); } /** * gobj_to_entity - returns the struct &media_entity pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_entity(gobj) \ container_of(gobj, struct media_entity, graph_obj) /** * gobj_to_pad - returns the struct &media_pad pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_pad(gobj) \ container_of(gobj, struct media_pad, graph_obj) /** * gobj_to_link - returns the struct &media_link pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_link(gobj) \ container_of(gobj, struct media_link, graph_obj) /** * gobj_to_intf - returns the struct &media_interface pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_intf(gobj) \ container_of(gobj, struct media_interface, graph_obj) /** * intf_to_devnode - returns the struct media_intf_devnode pointer from the * @intf contained on it. * * @intf: Pointer to struct &media_intf_devnode */ #define intf_to_devnode(intf) \ container_of(intf, struct media_intf_devnode, intf) /** * media_gobj_create - Initialize a graph object * * @mdev: Pointer to the &media_device that contains the object * @type: Type of the object * @gobj: Pointer to the struct &media_gobj graph object * * This routine initializes the embedded struct &media_gobj inside a * media graph object. It is called automatically if ``media_*_create`` * function calls are used. However, if the object (entity, link, pad, * interface) is embedded on some other object, this function should be * called before registering the object at the media controller. */ void media_gobj_create(struct media_device *mdev, enum media_gobj_type type, struct media_gobj *gobj); /** * media_gobj_destroy - Stop using a graph object on a media device * * @gobj: Pointer to the struct &media_gobj graph object * * This should be called by all routines like media_device_unregister() * that remove/destroy media graph objects. */ void media_gobj_destroy(struct media_gobj *gobj); /** * media_entity_pads_init() - Initialize the entity pads * * @entity: entity where the pads belong * @num_pads: total number of sink and source pads * @pads: Array of @num_pads pads. * * The pads array is managed by the entity driver and passed to * media_entity_pads_init() where its pointer will be stored in the * &media_entity structure. * * If no pads are needed, drivers could either directly fill * &media_entity->num_pads with 0 and &media_entity->pads with %NULL or call * this function that will do the same. * * As the number of pads is known in advance, the pads array is not allocated * dynamically but is managed by the entity driver. Most drivers will embed the * pads array in a driver-specific structure, avoiding dynamic allocation. * * Drivers must set the direction of every pad in the pads array before calling * media_entity_pads_init(). The function will initialize the other pads fields. */ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, struct media_pad *pads); /** * media_entity_cleanup() - free resources associated with an entity * * @entity: entity where the pads belong * * This function must be called during the cleanup phase after unregistering * the entity (currently, it does nothing). * * Calling media_entity_cleanup() on a media_entity whose memory has been * zeroed but that has not been initialized with media_entity_pad_init() is * valid and is a no-op. */ #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) static inline void media_entity_cleanup(struct media_entity *entity) {} #else #define media_entity_cleanup(entity) do { } while (false) #endif /** * media_get_pad_index() - retrieves a pad index from an entity * * @entity: entity where the pads belong * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types * @sig_type: type of signal of the pad to be search * * This helper function finds the first pad index inside an entity that * satisfies both @is_sink and @sig_type conditions. * * Return: * * On success, return the pad number. If the pad was not found or the media * entity is a NULL pointer, return -EINVAL. */ int media_get_pad_index(struct media_entity *entity, u32 pad_type, enum media_pad_signal_type sig_type); /** * media_create_pad_link() - creates a link between two entities. * * @source: pointer to &media_entity of the source pad. * @source_pad: number of the source pad in the pads array * @sink: pointer to &media_entity of the sink pad. * @sink_pad: number of the sink pad in the pads array. * @flags: Link flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_LNK_FL_*``) * * Valid values for flags: * * %MEDIA_LNK_FL_ENABLED * Indicates that the link is enabled and can be used to transfer media data. * When two or more links target a sink pad, only one of them can be * enabled at a time. * * %MEDIA_LNK_FL_IMMUTABLE * Indicates that the link enabled state can't be modified at runtime. If * %MEDIA_LNK_FL_IMMUTABLE is set, then %MEDIA_LNK_FL_ENABLED must also be * set, since an immutable link is always enabled. * * .. note:: * * Before calling this function, media_entity_pads_init() and * media_device_register_entity() should be called previously for both ends. */ __must_check int media_create_pad_link(struct media_entity *source, u16 source_pad, struct media_entity *sink, u16 sink_pad, u32 flags); /** * media_create_pad_links() - creates a link between two entities. * * @mdev: Pointer to the media_device that contains the object * @source_function: Function of the source entities. Used only if @source is * NULL. * @source: pointer to &media_entity of the source pad. If NULL, it will use * all entities that matches the @sink_function. * @source_pad: number of the source pad in the pads array * @sink_function: Function of the sink entities. Used only if @sink is NULL. * @sink: pointer to &media_entity of the sink pad. If NULL, it will use * all entities that matches the @sink_function. * @sink_pad: number of the sink pad in the pads array. * @flags: Link flags, as defined in include/uapi/linux/media.h. * @allow_both_undefined: if %true, then both @source and @sink can be NULL. * In such case, it will create a crossbar between all entities that * matches @source_function to all entities that matches @sink_function. * If %false, it will return 0 and won't create any link if both @source * and @sink are NULL. * * Valid values for flags: * * A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can be * used to transfer media data. If multiple links are created and this * flag is passed as an argument, only the first created link will have * this flag. * * A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is * always enabled. * * It is common for some devices to have multiple source and/or sink entities * of the same type that should be linked. While media_create_pad_link() * creates link by link, this function is meant to allow 1:n, n:1 and even * cross-bar (n:n) links. * * .. note:: * * Before calling this function, media_entity_pads_init() and * media_device_register_entity() should be called previously for the * entities to be linked. */ int media_create_pad_links(const struct media_device *mdev, const u32 source_function, struct media_entity *source, const u16 source_pad, const u32 sink_function, struct media_entity *sink, const u16 sink_pad, u32 flags, const bool allow_both_undefined); void __media_entity_remove_links(struct media_entity *entity); /** * media_entity_remove_links() - remove all links associated with an entity * * @entity: pointer to &media_entity * * .. note:: * * This is called automatically when an entity is unregistered via * media_device_register_entity(). */ void media_entity_remove_links(struct media_entity *entity); /** * __media_entity_setup_link - Configure a media link without locking * @link: The link being configured * @flags: Link configuration flags * * The bulk of link setup is handled by the two entities connected through the * link. This function notifies both entities of the link configuration change. * * If the link is immutable or if the current and new configuration are * identical, return immediately. * * The user is expected to hold link->source->parent->mutex. If not, * media_entity_setup_link() should be used instead. */ int __media_entity_setup_link(struct media_link *link, u32 flags); /** * media_entity_setup_link() - changes the link flags properties in runtime * * @link: pointer to &media_link * @flags: the requested new link flags * * The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag * to enable/disable a link. Links marked with the * %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled. * * When a link is enabled or disabled, the media framework calls the * link_setup operation for the two entities at the source and sink of the * link, in that order. If the second link_setup call fails, another * link_setup call is made on the first entity to restore the original link * flags. * * Media device drivers can be notified of link setup operations by setting the * &media_device.link_notify pointer to a callback function. If provided, the * notification callback will be called before enabling and after disabling * links. * * Entity drivers must implement the link_setup operation if any of their links * is non-immutable. The operation must either configure the hardware or store * the configuration information to be applied later. * * Link configuration must not have any side effect on other links. If an * enabled link at a sink pad prevents another link at the same pad from * being enabled, the link_setup operation must return %-EBUSY and can't * implicitly disable the first enabled link. * * .. note:: * * The valid values of the flags for the link is the same as described * on media_create_pad_link(), for pad to pad links or the same as described * on media_create_intf_link(), for interface to entity links. */ int media_entity_setup_link(struct media_link *link, u32 flags); /** * media_entity_find_link - Find a link between two pads * @source: Source pad * @sink: Sink pad * * Return: returns a pointer to the link between the two entities. If no * such link exists, return %NULL. */ struct media_link *media_entity_find_link(struct media_pad *source, struct media_pad *sink); /** * media_pad_remote_pad_first - Find the first pad at the remote end of a link * @pad: Pad at the local end of the link * * Search for a remote pad connected to the given pad by iterating over all * links originating or terminating at that pad until an enabled link is found. * * Return: returns a pointer to the pad at the remote end of the first found * enabled link, or %NULL if no enabled link has been found. */ struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad); /** * media_pad_remote_pad_unique - Find a remote pad connected to a pad * @pad: The pad * * Search for and return a remote pad connected to @pad through an enabled * link. If multiple (or no) remote pads are found, an error is returned. * * The uniqueness constraint makes this helper function suitable for entities * that support a single active source at a time on a given pad. * * Return: A pointer to the remote pad, or one of the following error pointers * if an error occurs: * * * -ENOTUNIQ - Multiple links are enabled * * -ENOLINK - No connected pad found */ struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad); /** * media_entity_remote_pad_unique - Find a remote pad connected to an entity * @entity: The entity * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE) * * Search for and return a remote pad of @type connected to @entity through an * enabled link. If multiple (or no) remote pads match these criteria, an error * is returned. * * The uniqueness constraint makes this helper function suitable for entities * that support a single active source or sink at a time. * * Return: A pointer to the remote pad, or one of the following error pointers * if an error occurs: * * * -ENOTUNIQ - Multiple links are enabled * * -ENOLINK - No connected pad found */ struct media_pad * media_entity_remote_pad_unique(const struct media_entity *entity, unsigned int type); /** * media_entity_remote_source_pad_unique - Find a remote source pad connected to * an entity * @entity: The entity * * Search for and return a remote source pad connected to @entity through an * enabled link. If multiple (or no) remote pads match these criteria, an error * is returned. * * The uniqueness constraint makes this helper function suitable for entities * that support a single active source at a time. * * Return: A pointer to the remote pad, or one of the following error pointers * if an error occurs: * * * -ENOTUNIQ - Multiple links are enabled * * -ENOLINK - No connected pad found */ static inline struct media_pad * media_entity_remote_source_pad_unique(const struct media_entity *entity) { return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE); } /** * media_pad_is_streaming - Test if a pad is part of a streaming pipeline * @pad: The pad * * Return: True if the pad is part of a pipeline started with the * media_pipeline_start() function, false otherwise. */ static inline bool media_pad_is_streaming(const struct media_pad *pad) { return pad->pipe; } /** * media_entity_is_streaming - Test if an entity is part of a streaming pipeline * @entity: The entity * * Return: True if the entity is part of a pipeline started with the * media_pipeline_start() function, false otherwise. */ static inline bool media_entity_is_streaming(const struct media_entity *entity) { struct media_pad *pad; media_entity_for_each_pad(entity, pad) { if (media_pad_is_streaming(pad)) return true; } return false; } /** * media_entity_pipeline - Get the media pipeline an entity is part of * @entity: The entity * * DEPRECATED: use media_pad_pipeline() instead. * * This function returns the media pipeline that an entity has been associated * with when constructing the pipeline with media_pipeline_start(). The pointer * remains valid until media_pipeline_stop() is called. * * In general, entities can be part of multiple pipelines, when carrying * multiple streams (either on different pads, or on the same pad using * multiplexed streams). This function is to be used only for entities that * do not support multiple pipelines. * * Return: The media_pipeline the entity is part of, or NULL if the entity is * not part of any pipeline. */ struct media_pipeline *media_entity_pipeline(struct media_entity *entity); /** * media_pad_pipeline - Get the media pipeline a pad is part of * @pad: The pad * * This function returns the media pipeline that a pad has been associated * with when constructing the pipeline with media_pipeline_start(). The pointer * remains valid until media_pipeline_stop() is called. * * Return: The media_pipeline the pad is part of, or NULL if the pad is * not part of any pipeline. */ struct media_pipeline *media_pad_pipeline(struct media_pad *pad); /** * media_entity_get_fwnode_pad - Get pad number from fwnode * * @entity: The entity * @fwnode: Pointer to the fwnode_handle which should be used to find the pad * @direction_flags: Expected direction of the pad, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_PAD_FL_*``) * * This function can be used to resolve the media pad number from * a fwnode. This is useful for devices which use more complex * mappings of media pads. * * If the entity does not implement the get_fwnode_pad() operation * then this function searches the entity for the first pad that * matches the @direction_flags. * * Return: returns the pad number on success or a negative error code. */ int media_entity_get_fwnode_pad(struct media_entity *entity, const struct fwnode_handle *fwnode, unsigned long direction_flags); /** * media_graph_walk_init - Allocate resources used by graph walk. * * @graph: Media graph structure that will be used to walk the graph * @mdev: Pointer to the &media_device that contains the object * * This function is deprecated, use media_pipeline_for_each_pad() instead. * * The caller is required to hold the media_device graph_mutex during the graph * walk until the graph state is released. * * Returns zero on success or a negative error code otherwise. */ __must_check int media_graph_walk_init( struct media_graph *graph, struct media_device *mdev); /** * media_graph_walk_cleanup - Release resources used by graph walk. * * @graph: Media graph structure that will be used to walk the graph * * This function is deprecated, use media_pipeline_for_each_pad() instead. */ void media_graph_walk_cleanup(struct media_graph *graph); /** * media_graph_walk_start - Start walking the media graph at a * given entity * * @graph: Media graph structure that will be used to walk the graph * @entity: Starting entity * * This function is deprecated, use media_pipeline_for_each_pad() instead. * * Before using this function, media_graph_walk_init() must be * used to allocate resources used for walking the graph. This * function initializes the graph traversal structure to walk the * entities graph starting at the given entity. The traversal * structure must not be modified by the caller during graph * traversal. After the graph walk, the resources must be released * using media_graph_walk_cleanup(). */ void media_graph_walk_start(struct media_graph *graph, struct media_entity *entity); /** * media_graph_walk_next - Get the next entity in the graph * @graph: Media graph structure * * This function is deprecated, use media_pipeline_for_each_pad() instead. * * Perform a depth-first traversal of the given media entities graph. * * The graph structure must have been previously initialized with a call to * media_graph_walk_start(). * * Return: returns the next entity in the graph or %NULL if the whole graph * have been traversed. */ struct media_entity *media_graph_walk_next(struct media_graph *graph); /** * media_pipeline_start - Mark a pipeline as streaming * @origin: Starting pad * @pipe: Media pipeline to be assigned to all pads in the pipeline. * * Mark all pads connected to pad @origin through enabled links, either * directly or indirectly, as streaming. The given pipeline object is assigned * to every pad in the pipeline and stored in the media_pad pipe field. * * Calls to this function can be nested, in which case the same number of * media_pipeline_stop() calls will be required to stop streaming. The * pipeline pointer must be identical for all nested calls to * media_pipeline_start(). */ __must_check int media_pipeline_start(struct media_pad *origin, struct media_pipeline *pipe); /** * __media_pipeline_start - Mark a pipeline as streaming * * @origin: Starting pad * @pipe: Media pipeline to be assigned to all pads in the pipeline. * * ..note:: This is the non-locking version of media_pipeline_start() */ __must_check int __media_pipeline_start(struct media_pad *origin, struct media_pipeline *pipe); /** * media_pipeline_stop - Mark a pipeline as not streaming * @pad: Starting pad * * Mark all pads connected to a given pad through enabled links, either * directly or indirectly, as not streaming. The media_pad pipe field is * reset to %NULL. * * If multiple calls to media_pipeline_start() have been made, the same * number of calls to this function are required to mark the pipeline as not * streaming. */ void media_pipeline_stop(struct media_pad *pad); /** * __media_pipeline_stop - Mark a pipeline as not streaming * * @pad: Starting pad * * .. note:: This is the non-locking version of media_pipeline_stop() */ void __media_pipeline_stop(struct media_pad *pad); struct media_pad * __media_pipeline_pad_iter_next(struct media_pipeline *pipe, struct media_pipeline_pad_iter *iter, struct media_pad *pad); /** * media_pipeline_for_each_pad - Iterate on all pads in a media pipeline * @pipe: The pipeline * @iter: The iterator (struct media_pipeline_pad_iter) * @pad: The iterator pad * * Iterate on all pads in a media pipeline. This is only valid after the * pipeline has been built with media_pipeline_start() and before it gets * destroyed with media_pipeline_stop(). */ #define media_pipeline_for_each_pad(pipe, iter, pad) \ for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \ pad != NULL; \ pad = __media_pipeline_pad_iter_next((pipe), iter, pad)) /** * media_pipeline_entity_iter_init - Initialize a pipeline entity iterator * @pipe: The pipeline * @iter: The iterator * * This function must be called to initialize the iterator before using it in a * media_pipeline_for_each_entity() loop. The iterator must be destroyed by a * call to media_pipeline_entity_iter_cleanup after the loop (including in code * paths that break from the loop). * * The same iterator can be used in multiple consecutive loops without being * destroyed and reinitialized. * * Return: 0 on success or a negative error code otherwise. */ int media_pipeline_entity_iter_init(struct media_pipeline *pipe, struct media_pipeline_entity_iter *iter); /** * media_pipeline_entity_iter_cleanup - Destroy a pipeline entity iterator * @iter: The iterator * * This function must be called to destroy iterators initialized with * media_pipeline_entity_iter_init(). */ void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter); struct media_entity * __media_pipeline_entity_iter_next(struct media_pipeline *pipe, struct media_pipeline_entity_iter *iter, struct media_entity *entity); /** * media_pipeline_for_each_entity - Iterate on all entities in a media pipeline * @pipe: The pipeline * @iter: The iterator (struct media_pipeline_entity_iter) * @entity: The iterator entity * * Iterate on all entities in a media pipeline. This is only valid after the * pipeline has been built with media_pipeline_start() and before it gets * destroyed with media_pipeline_stop(). The iterator must be initialized with * media_pipeline_entity_iter_init() before iteration, and destroyed with * media_pipeline_entity_iter_cleanup() after (including in code paths that * break from the loop). */ #define media_pipeline_for_each_entity(pipe, iter, entity) \ for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \ entity != NULL; \ entity = __media_pipeline_entity_iter_next((pipe), iter, entity)) /** * media_pipeline_alloc_start - Mark a pipeline as streaming * @pad: Starting pad * * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead * of working on a given pipeline the function will use an existing pipeline if * the pad is already part of a pipeline, or allocate a new pipeline. * * Calls to media_pipeline_alloc_start() must be matched with * media_pipeline_stop(). */ __must_check int media_pipeline_alloc_start(struct media_pad *pad); /** * media_devnode_create() - creates and initializes a device node interface * * @mdev: pointer to struct &media_device * @type: type of the interface, as given by * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_INTF_T_*``) macros. * @flags: Interface flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_INTF_FL_*``) * @major: Device node major number. * @minor: Device node minor number. * * Return: if succeeded, returns a pointer to the newly allocated * &media_intf_devnode pointer. * * .. note:: * * Currently, no flags for &media_interface is defined. */ struct media_intf_devnode * __must_check media_devnode_create(struct media_device *mdev, u32 type, u32 flags, u32 major, u32 minor); /** * media_devnode_remove() - removes a device node interface * * @devnode: pointer to &media_intf_devnode to be freed. * * When a device node interface is removed, all links to it are automatically * removed. */ void media_devnode_remove(struct media_intf_devnode *devnode); /** * media_create_intf_link() - creates a link between an entity and an interface * * @entity: pointer to %media_entity * @intf: pointer to %media_interface * @flags: Link flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_LNK_FL_*``) * * * Valid values for flags: * * %MEDIA_LNK_FL_ENABLED * Indicates that the interface is connected to the entity hardware. * That's the default value for interfaces. An interface may be disabled if * the hardware is busy due to the usage of some other interface that it is * currently controlling the hardware. * * A typical example is an hybrid TV device that handle only one type of * stream on a given time. So, when the digital TV is streaming, * the V4L2 interfaces won't be enabled, as such device is not able to * also stream analog TV or radio. * * .. note:: * * Before calling this function, media_devnode_create() should be called for * the interface and media_device_register_entity() should be called for the * interface that will be part of the link. */ struct media_link * __must_check media_create_intf_link(struct media_entity *entity, struct media_interface *intf, u32 flags); /** * __media_remove_intf_link() - remove a single interface link * * @link: pointer to &media_link. * * .. note:: This is an unlocked version of media_remove_intf_link() */ void __media_remove_intf_link(struct media_link *link); /** * media_remove_intf_link() - remove a single interface link * * @link: pointer to &media_link. * * .. note:: Prefer to use this one, instead of __media_remove_intf_link() */ void media_remove_intf_link(struct media_link *link); /** * __media_remove_intf_links() - remove all links associated with an interface * * @intf: pointer to &media_interface * * .. note:: This is an unlocked version of media_remove_intf_links(). */ void __media_remove_intf_links(struct media_interface *intf); /** * media_remove_intf_links() - remove all links associated with an interface * * @intf: pointer to &media_interface * * .. note:: * * #) This is called automatically when an entity is unregistered via * media_device_register_entity() and by media_devnode_remove(). * * #) Prefer to use this one, instead of __media_remove_intf_links(). */ void media_remove_intf_links(struct media_interface *intf); /** * media_entity_call - Calls a struct media_entity_operations operation on * an entity * * @entity: entity where the @operation will be called * @operation: type of the operation. Should be the name of a member of * struct &media_entity_operations. * * This helper function will check if @operation is not %NULL. On such case, * it will issue a call to @operation\(@entity, @args\). */ #define media_entity_call(entity, operation, args...) \ (((entity)->ops && (entity)->ops->operation) ? \ (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD) /** * media_create_ancillary_link() - create an ancillary link between two * instances of &media_entity * * @primary: pointer to the primary &media_entity * @ancillary: pointer to the ancillary &media_entity * * Create an ancillary link between two entities, indicating that they * represent two connected pieces of hardware that form a single logical unit. * A typical example is a camera lens controller being linked to the sensor that * it is supporting. * * The function sets both MEDIA_LNK_FL_ENABLED and MEDIA_LNK_FL_IMMUTABLE for * the new link. */ struct media_link * media_create_ancillary_link(struct media_entity *primary, struct media_entity *ancillary); /** * __media_entity_next_link() - Iterate through a &media_entity's links * * @entity: pointer to the &media_entity * @link: pointer to a &media_link to hold the iterated values * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags * * Return the next link against an entity matching a specific link type. This * allows iteration through an entity's links whilst guaranteeing all of the * returned links are of the given type. */ struct media_link *__media_entity_next_link(struct media_entity *entity, struct media_link *link, unsigned long link_type); /** * for_each_media_entity_data_link() - Iterate through an entity's data links * * @entity: pointer to the &media_entity * @link: pointer to a &media_link to hold the iterated values * * Iterate over a &media_entity's data links */ #define for_each_media_entity_data_link(entity, link) \ for (link = __media_entity_next_link(entity, NULL, \ MEDIA_LNK_FL_DATA_LINK); \ link; \ link = __media_entity_next_link(entity, link, \ MEDIA_LNK_FL_DATA_LINK)) #endif
22 22 22 22 21 3 3 3 3 3 3 22 22 22 3 3 3 3 3 22 22 22 22 22 22 22 3 3 3 3 3 3 3 3 2 2 3 3 3 3 3 3 3 3 3 3 1 1 2 3 3 3 2 3 3 3 3 3 3 3 4 4 4 22 22 22 22 22 22 3 20 22 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 // SPDX-License-Identifier: GPL-2.0+ /* * module/drivers.c * functions for manipulating drivers * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net> */ #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/dma-direction.h> #include <linux/interrupt.h> #include <linux/firmware.h> #include <linux/comedi/comedidev.h> #include "comedi_internal.h" struct comedi_driver *comedi_drivers; /* protects access to comedi_drivers */ DEFINE_MUTEX(comedi_drivers_list_lock); /** * comedi_set_hw_dev() - Set hardware device associated with COMEDI device * @dev: COMEDI device. * @hw_dev: Hardware device. * * For automatically configured COMEDI devices (resulting from a call to * comedi_auto_config() or one of its wrappers from the low-level COMEDI * driver), comedi_set_hw_dev() is called automatically by the COMEDI core * to associate the COMEDI device with the hardware device. It can also be * called directly by "legacy" low-level COMEDI drivers that rely on the * %COMEDI_DEVCONFIG ioctl to configure the hardware as long as the hardware * has a &struct device. * * If @dev->hw_dev is NULL, it gets a reference to @hw_dev and sets * @dev->hw_dev, otherwise, it does nothing. Calling it multiple times * with the same hardware device is not considered an error. If it gets * a reference to the hardware device, it will be automatically 'put' when * the device is detached from COMEDI. * * Returns 0 if @dev->hw_dev was NULL or the same as @hw_dev, otherwise * returns -EEXIST. */ int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev) { if (hw_dev == dev->hw_dev) return 0; if (dev->hw_dev) return -EEXIST; dev->hw_dev = get_device(hw_dev); return 0; } EXPORT_SYMBOL_GPL(comedi_set_hw_dev); static void comedi_clear_hw_dev(struct comedi_device *dev) { put_device(dev->hw_dev); dev->hw_dev = NULL; } /** * comedi_alloc_devpriv() - Allocate memory for the device private data * @dev: COMEDI device. * @size: Size of the memory to allocate. * * The allocated memory is zero-filled. @dev->private points to it on * return. The memory will be automatically freed when the COMEDI device is * "detached". * * Returns a pointer to the allocated memory, or NULL on failure. */ void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size) { dev->private = kzalloc(size, GFP_KERNEL); return dev->private; } EXPORT_SYMBOL_GPL(comedi_alloc_devpriv); /** * comedi_alloc_subdevices() - Allocate subdevices for COMEDI device * @dev: COMEDI device. * @num_subdevices: Number of subdevices to allocate. * * Allocates and initializes an array of &struct comedi_subdevice for the * COMEDI device. If successful, sets @dev->subdevices to point to the * first one and @dev->n_subdevices to the number. * * Returns 0 on success, -EINVAL if @num_subdevices is < 1, or -ENOMEM if * failed to allocate the memory. */ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices) { struct comedi_subdevice *s; int i; if (num_subdevices < 1) return -EINVAL; s = kcalloc(num_subdevices, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; dev->subdevices = s; dev->n_subdevices = num_subdevices; for (i = 0; i < num_subdevices; ++i) { s = &dev->subdevices[i]; s->device = dev; s->index = i; s->async_dma_dir = DMA_NONE; spin_lock_init(&s->spin_lock); s->minor = -1; } return 0; } EXPORT_SYMBOL_GPL(comedi_alloc_subdevices); /** * comedi_alloc_subdev_readback() - Allocate memory for the subdevice readback * @s: COMEDI subdevice. * * This is called by low-level COMEDI drivers to allocate an array to record * the last values written to a subdevice's analog output channels (at least * by the %INSN_WRITE instruction), to allow them to be read back by an * %INSN_READ instruction. It also provides a default handler for the * %INSN_READ instruction unless one has already been set. * * On success, @s->readback points to the first element of the array, which * is zero-filled. The low-level driver is responsible for updating its * contents. @s->insn_read will be set to comedi_readback_insn_read() * unless it is already non-NULL. * * Returns 0 on success, -EINVAL if the subdevice has no channels, or * -ENOMEM on allocation failure. */ int comedi_alloc_subdev_readback(struct comedi_subdevice *s) { if (!s->n_chan) return -EINVAL; s->readback = kcalloc(s->n_chan, sizeof(*s->readback), GFP_KERNEL); if (!s->readback) return -ENOMEM; if (!s->insn_read) s->insn_read = comedi_readback_insn_read; return 0; } EXPORT_SYMBOL_GPL(comedi_alloc_subdev_readback); static void comedi_device_detach_cleanup(struct comedi_device *dev) { int i; struct comedi_subdevice *s; lockdep_assert_held(&dev->attach_lock); lockdep_assert_held(&dev->mutex); if (dev->subdevices) { for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (comedi_can_auto_free_spriv(s)) kfree(s->private); comedi_free_subdevice_minor(s); if (s->async) { comedi_buf_alloc(dev, s, 0); kfree(s->async); } kfree(s->readback); } kfree(dev->subdevices); dev->subdevices = NULL; dev->n_subdevices = 0; } kfree(dev->private); if (!IS_ERR(dev->pacer)) kfree(dev->pacer); dev->private = NULL; dev->pacer = NULL; dev->driver = NULL; dev->board_name = NULL; dev->board_ptr = NULL; dev->mmio = NULL; dev->iobase = 0; dev->iolen = 0; dev->ioenabled = false; dev->irq = 0; dev->read_subdev = NULL; dev->write_subdev = NULL; dev->open = NULL; dev->close = NULL; comedi_clear_hw_dev(dev); } void comedi_device_detach(struct comedi_device *dev) { lockdep_assert_held(&dev->mutex); comedi_device_cancel_all(dev); down_write(&dev->attach_lock); dev->attached = false; dev->detach_count++; if (dev->driver) dev->driver->detach(dev); comedi_device_detach_cleanup(dev); up_write(&dev->attach_lock); } static int poll_invalid(struct comedi_device *dev, struct comedi_subdevice *s) { return -EINVAL; } static int insn_device_inval(struct comedi_device *dev, struct comedi_insn *insn, unsigned int *data) { return -EINVAL; } static unsigned int get_zero_valid_routes(struct comedi_device *dev, unsigned int n_pairs, unsigned int *pair_data) { return 0; } int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { return -EINVAL; } /** * comedi_readback_insn_read() - A generic (*insn_read) for subdevice readback. * @dev: COMEDI device. * @s: COMEDI subdevice. * @insn: COMEDI instruction. * @data: Pointer to return the readback data. * * Handles the %INSN_READ instruction for subdevices that use the readback * array allocated by comedi_alloc_subdev_readback(). It may be used * directly as the subdevice's handler (@s->insn_read) or called via a * wrapper. * * @insn->n is normally 1, which will read a single value. If higher, the * same element of the readback array will be read multiple times. * * Returns @insn->n on success, or -EINVAL if @s->readback is NULL. */ int comedi_readback_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); int i; if (!s->readback) return -EINVAL; for (i = 0; i < insn->n; i++) data[i] = s->readback[chan]; return insn->n; } EXPORT_SYMBOL_GPL(comedi_readback_insn_read); /** * comedi_timeout() - Busy-wait for a driver condition to occur * @dev: COMEDI device. * @s: COMEDI subdevice. * @insn: COMEDI instruction. * @cb: Callback to check for the condition. * @context: Private context from the driver. * * Busy-waits for up to a second (%COMEDI_TIMEOUT_MS) for the condition or * some error (other than -EBUSY) to occur. The parameters @dev, @s, @insn, * and @context are passed to the callback function, which returns -EBUSY to * continue waiting or some other value to stop waiting (generally 0 if the * condition occurred, or some error value). * * Returns -ETIMEDOUT if timed out, otherwise the return value from the * callback function. */ int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, int (*cb)(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context), unsigned long context) { unsigned long timeout = jiffies + msecs_to_jiffies(COMEDI_TIMEOUT_MS); int ret; while (time_before(jiffies, timeout)) { ret = cb(dev, s, insn, context); if (ret != -EBUSY) return ret; /* success (0) or non EBUSY errno */ cpu_relax(); } return -ETIMEDOUT; } EXPORT_SYMBOL_GPL(comedi_timeout); /** * comedi_dio_insn_config() - Boilerplate (*insn_config) for DIO subdevices * @dev: COMEDI device. * @s: COMEDI subdevice. * @insn: COMEDI instruction. * @data: Instruction parameters and return data. * @mask: io_bits mask for grouped channels, or 0 for single channel. * * If @mask is 0, it is replaced with a single-bit mask corresponding to the * channel number specified by @insn->chanspec. Otherwise, @mask * corresponds to a group of channels (which should include the specified * channel) that are always configured together as inputs or outputs. * * Partially handles the %INSN_CONFIG_DIO_INPUT, %INSN_CONFIG_DIO_OUTPUTS, * and %INSN_CONFIG_DIO_QUERY instructions. The first two update * @s->io_bits to record the directions of the masked channels. The last * one sets @data[1] to the current direction of the group of channels * (%COMEDI_INPUT) or %COMEDI_OUTPUT) as recorded in @s->io_bits. * * The caller is responsible for updating the DIO direction in the hardware * registers if this function returns 0. * * Returns 0 for a %INSN_CONFIG_DIO_INPUT or %INSN_CONFIG_DIO_OUTPUT * instruction, @insn->n (> 0) for a %INSN_CONFIG_DIO_QUERY instruction, or * -EINVAL for some other instruction. */ int comedi_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data, unsigned int mask) { unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec); if (!mask) mask = chan_mask; switch (data[0]) { case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~mask; break; case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= mask; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; default: return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(comedi_dio_insn_config); /** * comedi_dio_update_state() - Update the internal state of DIO subdevices * @s: COMEDI subdevice. * @data: The channel mask and bits to update. * * Updates @s->state which holds the internal state of the outputs for DIO * or DO subdevices (up to 32 channels). @data[0] contains a bit-mask of * the channels to be updated. @data[1] contains a bit-mask of those * channels to be set to '1'. The caller is responsible for updating the * outputs in hardware according to @s->state. As a minimum, the channels * in the returned bit-mask need to be updated. * * Returns @mask with non-existent channels removed. */ unsigned int comedi_dio_update_state(struct comedi_subdevice *s, unsigned int *data) { unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1) : 0xffffffff; unsigned int mask = data[0] & chanmask; unsigned int bits = data[1]; if (mask) { s->state &= ~mask; s->state |= (bits & mask); } return mask; } EXPORT_SYMBOL_GPL(comedi_dio_update_state); /** * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in * bytes * @s: COMEDI subdevice. * @cmd: COMEDI command. * * Determines the overall scan length according to the subdevice type and the * number of channels in the scan for the specified command. * * For digital input, output or input/output subdevices, samples for * multiple channels are assumed to be packed into one or more unsigned * short or unsigned int values according to the subdevice's %SDF_LSAMPL * flag. For other types of subdevice, samples are assumed to occupy a * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. * * Returns the overall scan length in bytes. */ unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int num_samples; unsigned int bits_per_sample; switch (s->type) { case COMEDI_SUBD_DI: case COMEDI_SUBD_DO: case COMEDI_SUBD_DIO: bits_per_sample = 8 * comedi_bytes_per_sample(s); num_samples = DIV_ROUND_UP(cmd->scan_end_arg, bits_per_sample); break; default: num_samples = cmd->scan_end_arg; break; } return comedi_samples_to_bytes(s, num_samples); } EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd); /** * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes * @s: COMEDI subdevice. * * Determines the overall scan length according to the subdevice type and the * number of channels in the scan for the current command. * * For digital input, output or input/output subdevices, samples for * multiple channels are assumed to be packed into one or more unsigned * short or unsigned int values according to the subdevice's %SDF_LSAMPL * flag. For other types of subdevice, samples are assumed to occupy a * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag. * * Returns the overall scan length in bytes. */ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; return comedi_bytes_per_scan_cmd(s, cmd); } EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, unsigned int nscans) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; if (cmd->stop_src == TRIG_COUNT) { unsigned int scans_left = 0; if (async->scans_done < cmd->stop_arg) scans_left = cmd->stop_arg - async->scans_done; if (nscans > scans_left) nscans = scans_left; } return nscans; } /** * comedi_nscans_left() - Return the number of scans left in the command * @s: COMEDI subdevice. * @nscans: The expected number of scans or 0 for all available scans. * * If @nscans is 0, it is set to the number of scans available in the * async buffer. * * If the async command has a stop_src of %TRIG_COUNT, the @nscans will be * checked against the number of scans remaining to complete the command. * * The return value will then be either the expected number of scans or the * number of scans remaining to complete the command, whichever is fewer. */ unsigned int comedi_nscans_left(struct comedi_subdevice *s, unsigned int nscans) { if (nscans == 0) { unsigned int nbytes = comedi_buf_read_n_available(s); nscans = nbytes / comedi_bytes_per_scan(s); } return __comedi_nscans_left(s, nscans); } EXPORT_SYMBOL_GPL(comedi_nscans_left); /** * comedi_nsamples_left() - Return the number of samples left in the command * @s: COMEDI subdevice. * @nsamples: The expected number of samples. * * Returns the number of samples remaining to complete the command, or the * specified expected number of samples (@nsamples), whichever is fewer. */ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, unsigned int nsamples) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned long long scans_left; unsigned long long samples_left; if (cmd->stop_src != TRIG_COUNT) return nsamples; scans_left = __comedi_nscans_left(s, cmd->stop_arg); if (!scans_left) return 0; samples_left = scans_left * cmd->scan_end_arg - comedi_bytes_to_samples(s, async->scan_progress); if (samples_left < nsamples) return samples_left; return nsamples; } EXPORT_SYMBOL_GPL(comedi_nsamples_left); /** * comedi_inc_scan_progress() - Update scan progress in asynchronous command * @s: COMEDI subdevice. * @num_bytes: Amount of data in bytes to increment scan progress. * * Increments the scan progress by the number of bytes specified by @num_bytes. * If the scan progress reaches or exceeds the scan length in bytes, reduce * it modulo the scan length in bytes and set the "end of scan" asynchronous * event flag (%COMEDI_CB_EOS) to be processed later. */ void comedi_inc_scan_progress(struct comedi_subdevice *s, unsigned int num_bytes) { struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int scan_length = comedi_bytes_per_scan(s); /* track the 'cur_chan' for non-SDF_PACKED subdevices */ if (!(s->subdev_flags & SDF_PACKED)) { async->cur_chan += comedi_bytes_to_samples(s, num_bytes); async->cur_chan %= cmd->chanlist_len; } async->scan_progress += num_bytes; if (async->scan_progress >= scan_length) { unsigned int nscans = async->scan_progress / scan_length; if (async->scans_done < (UINT_MAX - nscans)) async->scans_done += nscans; else async->scans_done = UINT_MAX; async->scan_progress %= scan_length; async->events |= COMEDI_CB_EOS; } } EXPORT_SYMBOL_GPL(comedi_inc_scan_progress); /** * comedi_handle_events() - Handle events and possibly stop acquisition * @dev: COMEDI device. * @s: COMEDI subdevice. * * Handles outstanding asynchronous acquisition event flags associated * with the subdevice. Call the subdevice's @s->cancel() handler if the * "end of acquisition", "error" or "overflow" event flags are set in order * to stop the acquisition at the driver level. * * Calls comedi_event() to further process the event flags, which may mark * the asynchronous command as no longer running, possibly terminated with * an error, and may wake up tasks. * * Return a bit-mask of the handled events. */ unsigned int comedi_handle_events(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned int events = s->async->events; if (events == 0) return events; if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel) s->cancel(dev, s); comedi_event(dev, s); return events; } EXPORT_SYMBOL_GPL(comedi_handle_events); static int insn_rw_emulate_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct comedi_insn _insn; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int base_chan = (chan < 32) ? 0 : chan; unsigned int _data[2]; int ret; memset(_data, 0, sizeof(_data)); memset(&_insn, 0, sizeof(_insn)); _insn.insn = INSN_BITS; _insn.chanspec = base_chan; _insn.n = 2; _insn.subdev = insn->subdev; if (insn->insn == INSN_WRITE) { if (!(s->subdev_flags & SDF_WRITABLE)) return -EINVAL; _data[0] = 1 << (chan - base_chan); /* mask */ _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */ } ret = s->insn_bits(dev, s, &_insn, _data); if (ret < 0) return ret; if (insn->insn == INSN_READ) data[0] = (_data[1] >> (chan - base_chan)) & 1; return 1; } static int __comedi_device_postconfig_async(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_async *async; unsigned int buf_size; int ret; lockdep_assert_held(&dev->mutex); if ((s->subdev_flags & (SDF_CMD_READ | SDF_CMD_WRITE)) == 0) { dev_warn(dev->class_dev, "async subdevices must support SDF_CMD_READ or SDF_CMD_WRITE\n"); return -EINVAL; } if (!s->do_cmdtest) { dev_warn(dev->class_dev, "async subdevices must have a do_cmdtest() function\n"); return -EINVAL; } if (!s->cancel) dev_warn(dev->class_dev, "async subdevices should have a cancel() function\n"); async = kzalloc(sizeof(*async), GFP_KERNEL); if (!async) return -ENOMEM; init_waitqueue_head(&async->wait_head); s->async = async; async->max_bufsize = comedi_default_buf_maxsize_kb * 1024; buf_size = comedi_default_buf_size_kb * 1024; if (buf_size > async->max_bufsize) buf_size = async->max_bufsize; if (comedi_buf_alloc(dev, s, buf_size) < 0) { dev_warn(dev->class_dev, "Buffer allocation failed\n"); return -ENOMEM; } if (s->buf_change) { ret = s->buf_change(dev, s); if (ret < 0) return ret; } comedi_alloc_subdevice_minor(s); return 0; } static int __comedi_device_postconfig(struct comedi_device *dev) { struct comedi_subdevice *s; int ret; int i; lockdep_assert_held(&dev->mutex); if (!dev->insn_device_config) dev->insn_device_config = insn_device_inval; if (!dev->get_valid_routes) dev->get_valid_routes = get_zero_valid_routes; for (i = 0; i < dev->n_subdevices; i++) { s = &dev->subdevices[i]; if (s->type == COMEDI_SUBD_UNUSED) continue; if (s->type == COMEDI_SUBD_DO) { if (s->n_chan < 32) s->io_bits = (1 << s->n_chan) - 1; else s->io_bits = 0xffffffff; } if (s->len_chanlist == 0) s->len_chanlist = 1; if (s->do_cmd) { ret = __comedi_device_postconfig_async(dev, s); if (ret) return ret; } if (!s->range_table && !s->range_table_list) s->range_table = &range_unknown; if (!s->insn_read && s->insn_bits) s->insn_read = insn_rw_emulate_bits; if (!s->insn_write && s->insn_bits) s->insn_write = insn_rw_emulate_bits; if (!s->insn_read) s->insn_read = insn_inval; if (!s->insn_write) s->insn_write = insn_inval; if (!s->insn_bits) s->insn_bits = insn_inval; if (!s->insn_config) s->insn_config = insn_inval; if (!s->poll) s->poll = poll_invalid; } return 0; } /* do a little post-config cleanup */ static int comedi_device_postconfig(struct comedi_device *dev) { int ret; lockdep_assert_held(&dev->mutex); ret = __comedi_device_postconfig(dev); if (ret < 0) return ret; down_write(&dev->attach_lock); dev->attached = true; up_write(&dev->attach_lock); return 0; } /* * Generic recognize function for drivers that register their supported * board names. * * 'driv->board_name' points to a 'const char *' member within the * zeroth element of an array of some private board information * structure, say 'struct foo_board' containing a member 'const char * *board_name' that is initialized to point to a board name string that * is one of the candidates matched against this function's 'name' * parameter. * * 'driv->offset' is the size of the private board information * structure, say 'sizeof(struct foo_board)', and 'driv->num_names' is * the length of the array of private board information structures. * * If one of the board names in the array of private board information * structures matches the name supplied to this function, the function * returns a pointer to the pointer to the board name, otherwise it * returns NULL. The return value ends up in the 'board_ptr' member of * a 'struct comedi_device' that the low-level comedi driver's * 'attach()' hook can convert to a point to a particular element of its * array of private board information structures by subtracting the * offset of the member that points to the board name. (No subtraction * is required if the board name pointer is the first member of the * private board information structure, which is generally the case.) */ static void *comedi_recognize(struct comedi_driver *driv, const char *name) { char **name_ptr = (char **)driv->board_name; int i; for (i = 0; i < driv->num_names; i++) { if (strcmp(*name_ptr, name) == 0) return name_ptr; name_ptr = (void *)name_ptr + driv->offset; } return NULL; } static void comedi_report_boards(struct comedi_driver *driv) { unsigned int i; const char *const *name_ptr; pr_info("comedi: valid board names for %s driver are:\n", driv->driver_name); name_ptr = driv->board_name; for (i = 0; i < driv->num_names; i++) { pr_info(" %s\n", *name_ptr); name_ptr = (const char **)((char *)name_ptr + driv->offset); } if (driv->num_names == 0) pr_info(" %s\n", driv->driver_name); } /** * comedi_load_firmware() - Request and load firmware for a device * @dev: COMEDI device. * @device: Hardware device. * @name: The name of the firmware image. * @cb: Callback to the upload the firmware image. * @context: Private context from the driver. * * Sends a firmware request for the hardware device and waits for it. Calls * the callback function to upload the firmware to the device, them releases * the firmware. * * Returns 0 on success, -EINVAL if @cb is NULL, or a negative error number * from the firmware request or the callback function. */ int comedi_load_firmware(struct comedi_device *dev, struct device *device, const char *name, int (*cb)(struct comedi_device *dev, const u8 *data, size_t size, unsigned long context), unsigned long context) { const struct firmware *fw; int ret; if (!cb) return -EINVAL; ret = request_firmware(&fw, name, device); if (ret == 0) { ret = cb(dev, fw->data, fw->size, context); release_firmware(fw); } return min(ret, 0); } EXPORT_SYMBOL_GPL(comedi_load_firmware); /** * __comedi_request_region() - Request an I/O region for a legacy driver * @dev: COMEDI device. * @start: Base address of the I/O region. * @len: Length of the I/O region. * * Requests the specified I/O port region which must start at a non-zero * address. * * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request * fails. */ int __comedi_request_region(struct comedi_device *dev, unsigned long start, unsigned long len) { if (!start) { dev_warn(dev->class_dev, "%s: a I/O base address must be specified\n", dev->board_name); return -EINVAL; } if (!request_region(start, len, dev->board_name)) { dev_warn(dev->class_dev, "%s: I/O port conflict (%#lx,%lu)\n", dev->board_name, start, len); return -EIO; } return 0; } EXPORT_SYMBOL_GPL(__comedi_request_region); /** * comedi_request_region() - Request an I/O region for a legacy driver * @dev: COMEDI device. * @start: Base address of the I/O region. * @len: Length of the I/O region. * * Requests the specified I/O port region which must start at a non-zero * address. * * On success, @dev->iobase is set to the base address of the region and * @dev->iolen is set to its length. * * Returns 0 on success, -EINVAL if @start is 0, or -EIO if the request * fails. */ int comedi_request_region(struct comedi_device *dev, unsigned long start, unsigned long len) { int ret; ret = __comedi_request_region(dev, start, len); if (ret == 0) { dev->iobase = start; dev->iolen = len; } return ret; } EXPORT_SYMBOL_GPL(comedi_request_region); /** * comedi_legacy_detach() - A generic (*detach) function for legacy drivers * @dev: COMEDI device. * * This is a simple, generic 'detach' handler for legacy COMEDI devices that * just use a single I/O port region and possibly an IRQ and that don't need * any special clean-up for their private device or subdevice storage. It * can also be called by a driver-specific 'detach' handler. * * If @dev->irq is non-zero, the IRQ will be freed. If @dev->iobase and * @dev->iolen are both non-zero, the I/O port region will be released. */ void comedi_legacy_detach(struct comedi_device *dev) { if (dev->irq) { free_irq(dev->irq, dev); dev->irq = 0; } if (dev->iobase && dev->iolen) { release_region(dev->iobase, dev->iolen); dev->iobase = 0; dev->iolen = 0; } } EXPORT_SYMBOL_GPL(comedi_legacy_detach); int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_driver *driv; int ret; lockdep_assert_held(&dev->mutex); if (dev->attached) return -EBUSY; mutex_lock(&comedi_drivers_list_lock); for (driv = comedi_drivers; driv; driv = driv->next) { if (!try_module_get(driv->module)) continue; if (driv->num_names) { dev->board_ptr = comedi_recognize(driv, it->board_name); if (dev->board_ptr) break; } else if (strcmp(driv->driver_name, it->board_name) == 0) { break; } module_put(driv->module); } if (!driv) { /* recognize has failed if we get here */ /* report valid board names before returning error */ for (driv = comedi_drivers; driv; driv = driv->next) { if (!try_module_get(driv->module)) continue; comedi_report_boards(driv); module_put(driv->module); } ret = -EIO; goto out; } if (!driv->attach) { /* driver does not support manual configuration */ dev_warn(dev->class_dev, "driver '%s' does not support attach using comedi_config\n", driv->driver_name); module_put(driv->module); ret = -EIO; goto out; } dev->driver = driv; dev->board_name = dev->board_ptr ? *(const char **)dev->board_ptr : dev->driver->driver_name; ret = driv->attach(dev, it); if (ret >= 0) ret = comedi_device_postconfig(dev); if (ret < 0) { comedi_device_detach(dev); module_put(driv->module); } /* On success, the driver module count has been incremented. */ out: mutex_unlock(&comedi_drivers_list_lock); return ret; } /** * comedi_auto_config() - Create a COMEDI device for a hardware device * @hardware_device: Hardware device. * @driver: COMEDI low-level driver for the hardware device. * @context: Driver context for the auto_attach handler. * * Allocates a new COMEDI device for the hardware device and calls the * low-level driver's 'auto_attach' handler to set-up the hardware and * allocate the COMEDI subdevices. Additional "post-configuration" setting * up is performed on successful return from the 'auto_attach' handler. * If the 'auto_attach' handler fails, the low-level driver's 'detach' * handler will be called as part of the clean-up. * * This is usually called from a wrapper function in a bus-specific COMEDI * module, which in turn is usually called from a bus device 'probe' * function in the low-level driver. * * Returns 0 on success, -EINVAL if the parameters are invalid or the * post-configuration determines the driver has set the COMEDI device up * incorrectly, -ENOMEM if failed to allocate memory, -EBUSY if run out of * COMEDI minor device numbers, or some negative error number returned by * the driver's 'auto_attach' handler. */ int comedi_auto_config(struct device *hardware_device, struct comedi_driver *driver, unsigned long context) { struct comedi_device *dev; int ret; if (!hardware_device) { pr_warn("BUG! %s called with NULL hardware_device\n", __func__); return -EINVAL; } if (!driver) { dev_warn(hardware_device, "BUG! %s called with NULL comedi driver\n", __func__); return -EINVAL; } if (!driver->auto_attach) { dev_warn(hardware_device, "BUG! comedi driver '%s' has no auto_attach handler\n", driver->driver_name); return -EINVAL; } dev = comedi_alloc_board_minor(hardware_device); if (IS_ERR(dev)) { dev_warn(hardware_device, "driver '%s' could not create device.\n", driver->driver_name); return PTR_ERR(dev); } /* Note: comedi_alloc_board_minor() locked dev->mutex. */ lockdep_assert_held(&dev->mutex); dev->driver = driver; dev->board_name = dev->driver->driver_name; ret = driver->auto_attach(dev, context); if (ret >= 0) ret = comedi_device_postconfig(dev); if (ret < 0) { dev_warn(hardware_device, "driver '%s' failed to auto-configure device.\n", driver->driver_name); mutex_unlock(&dev->mutex); comedi_release_hardware_device(hardware_device); } else { /* * class_dev should be set properly here * after a successful auto config */ dev_info(dev->class_dev, "driver '%s' has successfully auto-configured '%s'.\n", driver->driver_name, dev->board_name); mutex_unlock(&dev->mutex); } return ret; } EXPORT_SYMBOL_GPL(comedi_auto_config); /** * comedi_auto_unconfig() - Unconfigure auto-allocated COMEDI device * @hardware_device: Hardware device previously passed to * comedi_auto_config(). * * Cleans up and eventually destroys the COMEDI device allocated by * comedi_auto_config() for the same hardware device. As part of this * clean-up, the low-level COMEDI driver's 'detach' handler will be called. * (The COMEDI device itself will persist in an unattached state if it is * still open, until it is released, and any mmapped buffers will persist * until they are munmapped.) * * This is usually called from a wrapper module in a bus-specific COMEDI * module, which in turn is usually set as the bus device 'remove' function * in the low-level COMEDI driver. */ void comedi_auto_unconfig(struct device *hardware_device) { if (!hardware_device) return; comedi_release_hardware_device(hardware_device); } EXPORT_SYMBOL_GPL(comedi_auto_unconfig); /** * comedi_driver_register() - Register a low-level COMEDI driver * @driver: Low-level COMEDI driver. * * The low-level COMEDI driver is added to the list of registered COMEDI * drivers. This is used by the handler for the "/proc/comedi" file and is * also used by the handler for the %COMEDI_DEVCONFIG ioctl to configure * "legacy" COMEDI devices (for those low-level drivers that support it). * * Returns 0. */ int comedi_driver_register(struct comedi_driver *driver) { mutex_lock(&comedi_drivers_list_lock); driver->next = comedi_drivers; comedi_drivers = driver; mutex_unlock(&comedi_drivers_list_lock); return 0; } EXPORT_SYMBOL_GPL(comedi_driver_register); /** * comedi_driver_unregister() - Unregister a low-level COMEDI driver * @driver: Low-level COMEDI driver. * * The low-level COMEDI driver is removed from the list of registered COMEDI * drivers. Detaches any COMEDI devices attached to the driver, which will * result in the low-level driver's 'detach' handler being called for those * devices before this function returns. */ void comedi_driver_unregister(struct comedi_driver *driver) { struct comedi_driver *prev; int i; /* unlink the driver */ mutex_lock(&comedi_drivers_list_lock); if (comedi_drivers == driver) { comedi_drivers = driver->next; } else { for (prev = comedi_drivers; prev->next; prev = prev->next) { if (prev->next == driver) { prev->next = driver->next; break; } } } mutex_unlock(&comedi_drivers_list_lock); /* check for devices using this driver */ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) { struct comedi_device *dev = comedi_dev_get_from_minor(i); if (!dev) continue; mutex_lock(&dev->mutex); if (dev->attached && dev->driver == driver) { if (dev->use_count) dev_warn(dev->class_dev, "BUG! detaching device with use_count=%d\n", dev->use_count); comedi_device_detach(dev); } mutex_unlock(&dev->mutex); comedi_dev_put(dev); } } EXPORT_SYMBOL_GPL(comedi_driver_unregister);
2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 // SPDX-License-Identifier: GPL-2.0-or-later /* * usbusx2y.c - ALSA USB US-428 Driver * 2005-04-14 Karsten Wiese Version 0.8.7.2: Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom. Tested ok with kernel 2.6.12-rc2. 2004-12-14 Karsten Wiese Version 0.8.7.1: snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open. 2004-12-02 Karsten Wiese Version 0.8.7: Use macro usb_maxpacket() for portability. 2004-10-26 Karsten Wiese Version 0.8.6: wake_up() process waiting in usx2y_urbs_start() on error. 2004-10-21 Karsten Wiese Version 0.8.5: nrpacks is runtime or compiletime configurable now with tested values from 1 to 4. 2004-10-03 Karsten Wiese Version 0.8.2: Avoid any possible racing while in prepare callback. 2004-09-30 Karsten Wiese Version 0.8.0: Simplified things and made ohci work again. 2004-09-20 Karsten Wiese Version 0.7.3: Use usb_kill_urb() instead of deprecated (kernel 2.6.9) usb_unlink_urb(). 2004-07-13 Karsten Wiese Version 0.7.1: Don't sleep in START/STOP callbacks anymore. us428 channels C/D not handled just for this version, sorry. 2004-06-21 Karsten Wiese Version 0.6.4: Temporarely suspend midi input to sanely call usb_set_interface() when setting format. 2004-06-12 Karsten Wiese Version 0.6.3: Made it thus the following rule is enforced: "All pcm substreams of one usx2y have to operate at the same rate & format." 2004-04-06 Karsten Wiese Version 0.6.0: Runs on 2.6.5 kernel without any "--with-debug=" things. us224 reported running. 2004-01-14 Karsten Wiese Version 0.5.1: Runs with 2.6.1 kernel. 2003-12-30 Karsten Wiese Version 0.4.1: Fix 24Bit 4Channel capturing for the us428. 2003-11-27 Karsten Wiese, Martin Langer Version 0.4: us122 support. us224 could be tested by uncommenting the sections containing USB_ID_US224 2003-11-03 Karsten Wiese Version 0.3: 24Bit support. "arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works. 2003-08-22 Karsten Wiese Version 0.0.8: Removed EZUSB Firmware. First Stage Firmwaredownload is now done by tascam-firmware downloader. See: http://usb-midi-fw.sourceforge.net/tascam-firmware.tar.gz 2003-06-18 Karsten Wiese Version 0.0.5: changed to compile with kernel 2.4.21 and alsa 0.9.4 2002-10-16 Karsten Wiese Version 0.0.4: compiles again with alsa-current. USB_ISO_ASAP not used anymore (most of the time), instead urb->start_frame is calculated here now, some calls inside usb-driver don't need to happen anymore. To get the best out of this: Disable APM-support in the kernel as APM-BIOS calls (once each second) hard disable interrupt for many precious milliseconds. This helped me much on my slowish PII 400 & PIII 500. ACPI yet untested but might cause the same bad behaviour. Use a kernel with lowlatency and preemptiv patches applied. To autoload snd-usb-midi append a line post-install snd-usb-us428 modprobe snd-usb-midi to /etc/modules.conf. known problems: sliders, knobs, lights not yet handled except MASTER Volume slider. "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does. KDE3: "Enable full duplex operation" deadlocks. 2002-08-31 Karsten Wiese Version 0.0.3: audio also simplex; simplifying: iso urbs only 1 packet, melted structs. ASYNC_UNLINK not used anymore: no more crashes so far..... for alsa 0.9 rc3. 2002-08-09 Karsten Wiese Version 0.0.2: midi works with snd-usb-midi, audio (only fullduplex now) with i.e. bristol. The firmware has been sniffed from win2k us-428 driver 3.09. * Copyright (c) 2002 - 2004 Karsten Wiese */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_usx2y_card_used[SNDRV_CARDS]; static void snd_usx2y_card_private_free(struct snd_card *card); static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s); #ifdef USX2Y_NRPACKS_VARIABLE int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ module_param(nrpacks, int, 0444); MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); #endif /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ static void i_usx2y_out04_int(struct urb *urb) { #ifdef CONFIG_SND_DEBUG if (urb->status) { int i; struct usx2ydev *usx2y = urb->context; for (i = 0; i < 10 && usx2y->as04.urb[i] != urb; i++) ; dev_dbg(&urb->dev->dev, "%s urb %i status=%i\n", __func__, i, urb->status); } #endif } static void i_usx2y_in04_int(struct urb *urb) { int err = 0; struct usx2ydev *usx2y = urb->context; struct us428ctls_sharedmem *us428ctls = usx2y->us428ctls_sharedmem; struct us428_p4out *p4out; int i, j, n, diff, send; usx2y->in04_int_calls++; if (urb->status) { dev_dbg(&urb->dev->dev, "Interrupt Pipe 4 came back with status=%i\n", urb->status); return; } if (us428ctls) { diff = -1; if (us428ctls->ctl_snapshot_last == -2) { diff = 0; memcpy(usx2y->in04_last, usx2y->in04_buf, sizeof(usx2y->in04_last)); us428ctls->ctl_snapshot_last = -1; } else { for (i = 0; i < 21; i++) { if (usx2y->in04_last[i] != ((char *)usx2y->in04_buf)[i]) { if (diff < 0) diff = i; usx2y->in04_last[i] = ((char *)usx2y->in04_buf)[i]; } } } if (diff >= 0) { n = us428ctls->ctl_snapshot_last + 1; if (n >= N_US428_CTL_BUFS || n < 0) n = 0; memcpy(us428ctls->ctl_snapshot + n, usx2y->in04_buf, sizeof(us428ctls->ctl_snapshot[0])); us428ctls->ctl_snapshot_differs_at[n] = diff; us428ctls->ctl_snapshot_last = n; wake_up(&usx2y->us428ctls_wait_queue_head); } } if (usx2y->us04) { if (!usx2y->us04->submitted) { do { err = usb_submit_urb(usx2y->us04->urb[usx2y->us04->submitted++], GFP_ATOMIC); } while (!err && usx2y->us04->submitted < usx2y->us04->len); } } else { if (us428ctls && us428ctls->p4out_last >= 0 && us428ctls->p4out_last < N_US428_P4OUT_BUFS) { if (us428ctls->p4out_last != us428ctls->p4out_sent) { send = us428ctls->p4out_sent + 1; if (send >= N_US428_P4OUT_BUFS) send = 0; for (j = 0; j < URBS_ASYNC_SEQ && !err; ++j) { if (!usx2y->as04.urb[j]->status) { p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost. usb_fill_bulk_urb(usx2y->as04.urb[j], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 0x04), &p4out->val.vol, p4out->type == ELT_LIGHT ? sizeof(struct us428_lights) : 5, i_usx2y_out04_int, usx2y); err = usb_submit_urb(usx2y->as04.urb[j], GFP_ATOMIC); us428ctls->p4out_sent = send; break; } } } } } if (err) dev_err(&urb->dev->dev, "in04_int() usb_submit_urb err=%i\n", err); urb->dev = usx2y->dev; usb_submit_urb(urb, GFP_ATOMIC); } /* * Prepare some urbs */ int usx2y_async_seq04_init(struct usx2ydev *usx2y) { int err = 0, i; if (WARN_ON(usx2y->as04.buffer)) return -EBUSY; usx2y->as04.buffer = kmalloc_array(URBS_ASYNC_SEQ, URB_DATA_LEN_ASYNC_SEQ, GFP_KERNEL); if (!usx2y->as04.buffer) { err = -ENOMEM; } else { for (i = 0; i < URBS_ASYNC_SEQ; ++i) { usx2y->as04.urb[i] = usb_alloc_urb(0, GFP_KERNEL); if (!usx2y->as04.urb[i]) { err = -ENOMEM; break; } usb_fill_bulk_urb(usx2y->as04.urb[i], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 0x04), usx2y->as04.buffer + URB_DATA_LEN_ASYNC_SEQ * i, 0, i_usx2y_out04_int, usx2y); err = usb_urb_ep_type_check(usx2y->as04.urb[i]); if (err < 0) break; } } if (err) usx2y_unlinkseq(&usx2y->as04); return err; } int usx2y_in04_init(struct usx2ydev *usx2y) { int err; if (WARN_ON(usx2y->in04_urb)) return -EBUSY; usx2y->in04_urb = usb_alloc_urb(0, GFP_KERNEL); if (!usx2y->in04_urb) { err = -ENOMEM; goto error; } usx2y->in04_buf = kmalloc(21, GFP_KERNEL); if (!usx2y->in04_buf) { err = -ENOMEM; goto error; } init_waitqueue_head(&usx2y->in04_wait_queue); usb_fill_int_urb(usx2y->in04_urb, usx2y->dev, usb_rcvintpipe(usx2y->dev, 0x4), usx2y->in04_buf, 21, i_usx2y_in04_int, usx2y, 10); if (usb_urb_ep_type_check(usx2y->in04_urb)) { err = -EINVAL; goto error; } return usb_submit_urb(usx2y->in04_urb, GFP_KERNEL); error: kfree(usx2y->in04_buf); usb_free_urb(usx2y->in04_urb); usx2y->in04_buf = NULL; usx2y->in04_urb = NULL; return err; } static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s) { int i; for (i = 0; i < URBS_ASYNC_SEQ; ++i) { if (!s->urb[i]) continue; usb_kill_urb(s->urb[i]); usb_free_urb(s->urb[i]); s->urb[i] = NULL; } kfree(s->buffer); s->buffer = NULL; } static const struct usb_device_id snd_usx2y_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US428 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US122 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US224 }, { /* terminator */ } }; MODULE_DEVICE_TABLE(usb, snd_usx2y_usb_id_table); static int usx2y_create_card(struct usb_device *device, struct usb_interface *intf, struct snd_card **cardp) { int dev; struct snd_card *card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_usx2y_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct usx2ydev), &card); if (err < 0) return err; snd_usx2y_card_used[usx2y(card)->card_index = dev] = 1; card->private_free = snd_usx2y_card_private_free; usx2y(card)->dev = device; init_waitqueue_head(&usx2y(card)->prepare_wait_queue); init_waitqueue_head(&usx2y(card)->us428ctls_wait_queue_head); mutex_init(&usx2y(card)->pcm_mutex); INIT_LIST_HEAD(&usx2y(card)->midi_list); strcpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0,//us428(card)->usbmidi.ifnum, usx2y(card)->dev->bus->busnum, usx2y(card)->dev->devnum); *cardp = card; return 0; } static void snd_usx2y_card_private_free(struct snd_card *card) { struct usx2ydev *usx2y = usx2y(card); kfree(usx2y->in04_buf); usb_free_urb(usx2y->in04_urb); if (usx2y->us428ctls_sharedmem) free_pages_exact(usx2y->us428ctls_sharedmem, US428_SHAREDMEM_PAGES); if (usx2y->card_index >= 0 && usx2y->card_index < SNDRV_CARDS) snd_usx2y_card_used[usx2y->card_index] = 0; } static void snd_usx2y_disconnect(struct usb_interface *intf) { struct snd_card *card; struct usx2ydev *usx2y; struct list_head *p; card = usb_get_intfdata(intf); if (!card) return; usx2y = usx2y(card); usx2y->chip_status = USX2Y_STAT_CHIP_HUP; usx2y_unlinkseq(&usx2y->as04); usb_kill_urb(usx2y->in04_urb); snd_card_disconnect(card); /* release the midi resources */ list_for_each(p, &usx2y->midi_list) { snd_usbmidi_disconnect(p); } if (usx2y->us428ctls_sharedmem) wake_up(&usx2y->us428ctls_wait_queue_head); snd_card_free_when_closed(card); } static int snd_usx2y_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *device = interface_to_usbdev(intf); struct snd_card *card; int err; #ifdef USX2Y_NRPACKS_VARIABLE if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX) return -EINVAL; #endif if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428)) return -EINVAL; err = usx2y_create_card(device, intf, &card); if (err < 0) return err; err = usx2y_hwdep_new(card, device); if (err < 0) goto error; err = snd_card_register(card); if (err < 0) goto error; dev_set_drvdata(&intf->dev, card); return 0; error: snd_card_free(card); return err; } static struct usb_driver snd_usx2y_usb_driver = { .name = "snd-usb-usx2y", .probe = snd_usx2y_probe, .disconnect = snd_usx2y_disconnect, .id_table = snd_usx2y_usb_id_table, }; module_usb_driver(snd_usx2y_usb_driver);
23 23 15 15 23 5 22 23 22 23 17 8 6 8 6 6 6 6 14 13 2 1 1 1 2 12 12 7 1 12 14 11 10 8 8 8 6 7 7 7 7 7 7 7 7 7 22 23 23 23 6 6 6 23 17 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "core.h" #include "usb.h" #include "base.h" #include "ps.h" #include "rtl8192c/fw_common.h" #include <linux/export.h> #include <linux/module.h> MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("USB basic driver for rtlwifi"); #define REALTEK_USB_VENQT_READ 0xC0 #define REALTEK_USB_VENQT_WRITE 0x40 #define REALTEK_USB_VENQT_CMD_REQ 0x05 #define REALTEK_USB_VENQT_CMD_IDX 0x00 #define MAX_USBCTRL_VENDORREQ_TIMES 10 static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw); static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype, u16 value, void *pdata, u16 len) { unsigned int pipe; int status; int vendorreq_times = 0; static int count; if (reqtype == REALTEK_USB_VENQT_READ) pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ else pipe = usb_sndctrlpipe(udev, 0); /* write_out */ do { status = usb_control_msg(udev, pipe, REALTEK_USB_VENQT_CMD_REQ, reqtype, value, REALTEK_USB_VENQT_CMD_IDX, pdata, len, 1000); if (status < 0) { /* firmware download is checksumed, don't retry */ if ((value >= FW_8192C_START_ADDRESS && value <= FW_8192C_END_ADDRESS)) break; } else { break; } } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES); if (status < 0 && count++ < 4) dev_err(&udev->dev, "reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x reqtype=0x%x\n", value, status, *(u32 *)pdata, reqtype); } static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len) { struct device *dev = rtlpriv->io.dev; struct usb_device *udev = to_usb_device(dev); u16 wvalue; __le32 *data; unsigned long flags; spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags); if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT) rtlpriv->usb_data_index = 0; data = &rtlpriv->usb_data[rtlpriv->usb_data_index]; spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags); wvalue = (u16)addr; _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_READ, wvalue, data, len); return le32_to_cpu(*data); } static void _usb_write_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val, u16 len) { struct device *dev = rtlpriv->io.dev; struct usb_device *udev = to_usb_device(dev); unsigned long flags; __le32 *data; u16 wvalue; spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags); if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT) rtlpriv->usb_data_index = 0; data = &rtlpriv->usb_data[rtlpriv->usb_data_index]; spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags); wvalue = (u16)(addr & 0x0000ffff); *data = cpu_to_le32(val); _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, wvalue, data, len); } static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr) { return (u8)_usb_read_sync(rtlpriv, addr, 1); } static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr) { return (u16)_usb_read_sync(rtlpriv, addr, 2); } static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr) { return _usb_read_sync(rtlpriv, addr, 4); } static void _usb_write8_sync(struct rtl_priv *rtlpriv, u32 addr, u8 val) { _usb_write_sync(rtlpriv, addr, val, 1); } static void _usb_write16_sync(struct rtl_priv *rtlpriv, u32 addr, u16 val) { _usb_write_sync(rtlpriv, addr, val, 2); } static void _usb_write32_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val) { _usb_write_sync(rtlpriv, addr, val, 4); } static void _usb_write_chunk_sync(struct rtl_priv *rtlpriv, u32 addr, u32 length, u8 *data) { struct usb_device *udev = to_usb_device(rtlpriv->io.dev); _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, addr, data, length); } static void _rtl_usb_io_handler_init(struct device *dev, struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->io.dev = dev; mutex_init(&rtlpriv->io.bb_mutex); rtlpriv->io.write8 = _usb_write8_sync; rtlpriv->io.write16 = _usb_write16_sync; rtlpriv->io.write32 = _usb_write32_sync; rtlpriv->io.write_chunk = _usb_write_chunk_sync; rtlpriv->io.read8 = _usb_read8_sync; rtlpriv->io.read16 = _usb_read16_sync; rtlpriv->io.read32 = _usb_read32_sync; } static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) { struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw); mutex_destroy(&rtlpriv->io.bb_mutex); } /* Default aggregation handler. Do nothing and just return the oldest skb. */ static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw, struct sk_buff_head *list) { return skb_dequeue(list); } #define IS_HIGH_SPEED_USB(udev) \ ((USB_SPEED_HIGH == (udev)->speed) ? true : false) static int _rtl_usb_init_tx(struct ieee80211_hw *hw) { u32 i; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev) ? USB_HIGH_SPEED_BULK_SIZE : USB_FULL_SPEED_BULK_SIZE; rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n", rtlusb->max_bulk_out_size); for (i = 0; i < __RTL_TXQ_NUM; i++) { u32 ep_num = rtlusb->ep_map.ep_mapping[i]; if (!ep_num) { rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Invalid endpoint map setting!\n"); return -EINVAL; } } rtlusb->usb_tx_post_hdl = rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl; rtlusb->usb_tx_cleanup = rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup; rtlusb->usb_tx_aggregate_hdl = (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl) ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl : &_none_usb_tx_aggregate_hdl; init_usb_anchor(&rtlusb->tx_submitted); for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { skb_queue_head_init(&rtlusb->tx_skb_queue[i]); init_usb_anchor(&rtlusb->tx_pending[i]); } return 0; } static void _rtl_rx_work(struct tasklet_struct *t); static int _rtl_usb_init_rx(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size; rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num; rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl; rtlusb->usb_rx_segregate_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl; pr_info("rx_max_size %d, rx_urb_num %d, in_ep %d\n", rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep); init_usb_anchor(&rtlusb->rx_submitted); init_usb_anchor(&rtlusb->rx_cleanup_urbs); skb_queue_head_init(&rtlusb->rx_queue); tasklet_setup(&rtlusb->rx_work_tasklet, _rtl_rx_work); return 0; } static int _rtl_usb_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); int err; u8 epidx; struct usb_interface *usb_intf = rtlusb->intf; u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints; rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0; for (epidx = 0; epidx < epnums; epidx++) { struct usb_endpoint_descriptor *pep_desc; pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc; if (usb_endpoint_dir_in(pep_desc)) { if (usb_endpoint_xfer_bulk(pep_desc)) { /* The vendor drivers assume there is only one * bulk in ep and that it's the first in ep. */ if (rtlusb->in_ep_nums == 0) rtlusb->in_ep = usb_endpoint_num(pep_desc); else pr_warn("%s: bulk in endpoint is not the first in endpoint\n", __func__); } rtlusb->in_ep_nums++; } else if (usb_endpoint_dir_out(pep_desc)) { if (rtlusb->out_ep_nums < RTL_USB_MAX_BULKOUT_NUM) { if (usb_endpoint_xfer_bulk(pep_desc)) rtlusb->out_eps[rtlusb->out_ep_nums] = usb_endpoint_num(pep_desc); } else { pr_warn("%s: found more bulk out endpoints than the expected %d\n", __func__, RTL_USB_MAX_BULKOUT_NUM); } rtlusb->out_ep_nums++; } rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n", pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize, pep_desc->bInterval); } if (rtlusb->out_ep_nums == 0) { pr_err("No output end points found\n"); return -EINVAL; } /* usb endpoint mapping */ err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw); if (err) return err; rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; err = _rtl_usb_init_tx(hw); if (err) return err; err = _rtl_usb_init_rx(hw); if (err) goto err_out; return 0; err_out: _rtl_usb_cleanup_tx(hw); return err; } static void rtl_usb_init_sw(struct ieee80211_hw *hw) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); rtlhal->hw = hw; ppsc->inactiveps = false; ppsc->leisure_ps = false; ppsc->fwctrl_lps = false; ppsc->reg_fwctrl_lps = 3; ppsc->reg_max_lps_awakeintvl = 5; ppsc->fwctrl_psmode = FW_PS_DTIM_MODE; /* IBSS */ mac->beacon_interval = 100; /* AMPDU */ mac->min_space_cfg = 0; mac->max_mss_density = 0; /* set sane AMPDU defaults */ mac->current_ampdu_density = 7; mac->current_ampdu_factor = 3; /* QOS */ rtlusb->acm_method = EACMWAY2_SW; /* IRQ */ /* HIMR - turn all on */ rtlusb->irq_mask[0] = 0xFFFFFFFF; /* HIMR_EX - turn all on */ rtlusb->irq_mask[1] = 0xFFFFFFFF; rtlusb->disablehwsm = true; } static void _rtl_rx_completed(struct urb *urb); static int _rtl_prep_rx_urb(struct ieee80211_hw *hw, struct rtl_usb *rtlusb, struct urb *urb, gfp_t gfp_mask) { void *buf; buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask, &urb->transfer_dma); if (!buf) { pr_err("Failed to usb_alloc_coherent!!\n"); return -ENOMEM; } usb_fill_bulk_urb(urb, rtlusb->udev, usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep), buf, rtlusb->rx_max_size, _rtl_rx_completed, rtlusb); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return 0; } static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = rtl_get_hdr(skb); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); } } static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = rtl_get_hdr(skb); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); if (likely(rtl_action_proc(hw, skb, false))) ieee80211_rx(hw, skb); else dev_kfree_skb_any(skb); } else { dev_kfree_skb_any(skb); } } static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct sk_buff *_skb; struct sk_buff_head rx_queue; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); skb_queue_head_init(&rx_queue); if (rtlusb->usb_rx_segregate_hdl) rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); _rtl_usb_rx_process_agg(hw, _skb); ieee80211_rx(hw, _skb); } } #define __RX_SKB_MAX_QUEUED 64 static void _rtl_rx_work(struct tasklet_struct *t) { struct rtl_usb *rtlusb = from_tasklet(rtlusb, t, rx_work_tasklet); struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct sk_buff *skb; while ((skb = skb_dequeue(&rtlusb->rx_queue))) { if (unlikely(IS_USB_STOP(rtlusb))) { dev_kfree_skb_any(skb); continue; } if (likely(!rtlusb->usb_rx_segregate_hdl)) { _rtl_usb_rx_process_noagg(hw, skb); } else { /* TO DO */ _rtl_rx_pre_process(hw, skb); pr_err("rx agg not supported\n"); } } } static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr, unsigned int len) { #if NET_IP_ALIGN != 0 unsigned int padding = 0; #endif /* make function no-op when possible */ if (NET_IP_ALIGN == 0 || len < sizeof(*hdr)) return 0; #if NET_IP_ALIGN != 0 /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */ /* TODO: deduplicate common code, define helper function instead? */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); padding ^= NET_IP_ALIGN; /* Input might be invalid, avoid accessing memory outside * the buffer. */ if ((unsigned long)qc - (unsigned long)hdr < len && *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) padding ^= NET_IP_ALIGN; } if (ieee80211_has_a4(hdr->frame_control)) padding ^= NET_IP_ALIGN; return padding; #endif } #define __RADIO_TAP_SIZE_RSV 32 static void _rtl_rx_completed(struct urb *_urb) { struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context; int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) goto free; if (likely(0 == _urb->status)) { unsigned int padding; struct sk_buff *skb; unsigned int qlen; unsigned int size = _urb->actual_length; struct ieee80211_hdr *hdr; if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) { pr_err("Too short packet from bulk IN! (len: %d)\n", size); goto resubmit; } qlen = skb_queue_len(&rtlusb->rx_queue); if (qlen >= __RX_SKB_MAX_QUEUED) { pr_err("Pending RX skbuff queue full! (qlen: %d)\n", qlen); goto resubmit; } hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE); padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE); skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding); if (!skb) { pr_err("Can't allocate skb for bulk IN!\n"); goto resubmit; } _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep); /* Make sure the payload data is 4 byte aligned. */ skb_reserve(skb, padding); /* reserve some space for mac80211's radiotap */ skb_reserve(skb, __RADIO_TAP_SIZE_RSV); skb_put_data(skb, _urb->transfer_buffer, size); skb_queue_tail(&rtlusb->rx_queue, skb); tasklet_schedule(&rtlusb->rx_work_tasklet); goto resubmit; } switch (_urb->status) { /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: break; } resubmit: usb_anchor_urb(_urb, &rtlusb->rx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(_urb); goto free; } return; free: /* On some architectures, usb_free_coherent must not be called from * hardirq context. Queue urb to cleanup list. */ usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs); } #undef __RADIO_TAP_SIZE_RSV static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *urb; usb_kill_anchored_urbs(&rtlusb->rx_submitted); tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } } static int _rtl_usb_receive(struct ieee80211_hw *hw) { struct urb *urb; int err; int i; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); /* 1600 == 1514 + max WLAN header + rtk info */ WARN_ON(rtlusb->rx_max_size < 1600); for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) goto err_out; err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { pr_err("Failed to prep_rx_urb!!\n"); usb_free_urb(urb); goto err_out; } usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_urb(urb); goto err_out; } usb_free_urb(urb); } return 0; err_out: usb_kill_anchored_urbs(&rtlusb->rx_submitted); return err; } static int rtl_usb_start(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); err = rtlpriv->cfg->ops->hw_init(hw); if (!err) { rtl_init_rx_config(hw); /* Enable software */ SET_USB_START(rtlusb); /* should after adapter start and interrupt enable. */ set_hal_start(rtlhal); /* Start bulk IN */ err = _rtl_usb_receive(hw); } return err; } /*======================= tx =========================================*/ static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); txinfo = IEEE80211_SKB_CB(_skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, _skb); } usb_kill_anchored_urbs(&rtlusb->tx_pending[i]); } usb_kill_anchored_urbs(&rtlusb->tx_submitted); } static void rtl_usb_cleanup(struct ieee80211_hw *hw) { _rtl_usb_cleanup_rx(hw); _rtl_usb_cleanup_tx(hw); } /* We may add some struct into struct rtl_usb later. Do deinit here. */ static void rtl_usb_deinit(struct ieee80211_hw *hw) { rtl_usb_cleanup(hw); } static void rtl_usb_stop(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *urb; /* should after adapter start and interrupt enable. */ set_hal_stop(rtlhal); cancel_work_sync(&rtlpriv->works.fill_h2c_cmd); /* Enable software */ SET_USB_STOP(rtlusb); /* free pre-allocated URBs from rtl_usb_start() */ usb_kill_anchored_urbs(&rtlusb->rx_submitted); tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); cancel_work_sync(&rtlpriv->works.update_beacon_work); flush_workqueue(rtlpriv->works.rtl_wq); skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } rtlpriv->cfg->ops->hw_disable(hw); } static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) { int err; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); usb_anchor_urb(_urb, &rtlusb->tx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (err < 0) { struct sk_buff *skb; pr_err("Failed to submit urb\n"); usb_unanchor_urb(_urb); skb = (struct sk_buff *)_urb->context; kfree_skb(skb); } usb_free_urb(_urb); } static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; rtlusb->usb_tx_post_hdl(hw, urb, skb); skb_pull(skb, RTL_TX_HEADER_SIZE); txinfo = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; if (urb->status) { pr_err("Urb has error status 0x%X\n", urb->status); goto out; } /* TODO: statistics */ out: ieee80211_tx_status_irqsafe(hw, skb); return urb->status; } static void _rtl_tx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *)urb->context; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0]; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); int err; if (unlikely(IS_USB_STOP(rtlusb))) return; err = _usb_tx_post(hw, urb, skb); if (err) { /* Ignore error and keep issuiing other urbs */ return; } } static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, struct sk_buff *skb, u32 ep_num) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *_urb; WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); if (!_urb) return NULL; _rtl_install_trx_info(rtlusb, skb, ep_num); usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, ep_num), skb->data, skb->len, _rtl_tx_complete, skb); _urb->transfer_flags |= URB_ZERO_PACKET; return _urb; } static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, enum rtl_txq qnum) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); u32 ep_num; struct urb *_urb = NULL; WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); if (unlikely(IS_USB_STOP(rtlusb))) { pr_err("USB device is stopping...\n"); kfree_skb(skb); return; } ep_num = rtlusb->ep_map.ep_mapping[qnum]; _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); if (unlikely(!_urb)) { pr_err("Can't allocate urb. Drop skb!\n"); kfree_skb(skb); return; } _rtl_submit_tx_urb(hw, _urb); } static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, u16 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl_tx_desc *pdesc = NULL; struct rtl_tcb_desc tcb_desc; struct ieee80211_hdr *hdr = rtl_get_hdr(skb); __le16 fc = hdr->frame_control; u8 *pda_addr = hdr->addr1; memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); if (ieee80211_is_auth(fc)) { rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); } if (rtlpriv->psc.sw_ps_enabled) { if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) && !ieee80211_has_pm(fc)) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); } rtl_action_proc(hw, skb, true); if (is_multicast_ether_addr(pda_addr)) rtlpriv->stats.txbytesmulticast += skb->len; else if (is_broadcast_ether_addr(pda_addr)) rtlpriv->stats.txbytesbroadcast += skb->len; else rtlpriv->stats.txbytesunicast += skb->len; rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb, hw_queue, &tcb_desc); if (ieee80211_is_data(fc)) rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); } static int rtl_usb_tx(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, struct rtl_tcb_desc *dummy) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct ieee80211_hdr *hdr = rtl_get_hdr(skb); __le16 fc = hdr->frame_control; u16 hw_queue; if (unlikely(is_hal_stop(rtlhal))) goto err_free; hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb)); _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue); _rtl_usb_transmit(hw, skb, hw_queue); return NETDEV_TX_OK; err_free: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb) { return false; } static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work) { struct rtl_works *rtlworks = container_of(work, struct rtl_works, fill_h2c_cmd); struct ieee80211_hw *hw = rtlworks->hw; struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask); } static const struct rtl_intf_ops rtl_usb_ops = { .adapter_start = rtl_usb_start, .adapter_stop = rtl_usb_stop, .adapter_tx = rtl_usb_tx, .waitq_insert = rtl_usb_tx_chk_waitq_insert, }; int rtl_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, const struct rtl_hal_cfg *rtl_hal_cfg) { int err; struct ieee80211_hw *hw = NULL; struct rtl_priv *rtlpriv = NULL; struct usb_device *udev; struct rtl_usb_priv *usb_priv; hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) + sizeof(struct rtl_usb_priv), &rtl_ops); if (!hw) { pr_warn("rtl_usb: ieee80211 alloc failed\n"); return -ENOMEM; } rtlpriv = hw->priv; rtlpriv->hw = hw; rtlpriv->usb_data = kcalloc(RTL_USB_MAX_RX_COUNT, sizeof(u32), GFP_KERNEL); if (!rtlpriv->usb_data) { ieee80211_free_hw(hw); return -ENOMEM; } /* this spin lock must be initialized early */ spin_lock_init(&rtlpriv->locks.usb_lock); INIT_WORK(&rtlpriv->works.fill_h2c_cmd, rtl_fill_h2c_cmd_work_callback); INIT_WORK(&rtlpriv->works.lps_change_work, rtl_lps_change_work_callback); INIT_WORK(&rtlpriv->works.update_beacon_work, rtl_update_beacon_work_callback); rtlpriv->usb_data_index = 0; init_completion(&rtlpriv->firmware_loading_complete); SET_IEEE80211_DEV(hw, &intf->dev); udev = interface_to_usbdev(intf); usb_get_dev(udev); usb_priv = rtl_usbpriv(hw); memset(usb_priv, 0, sizeof(*usb_priv)); usb_priv->dev.intf = intf; usb_priv->dev.udev = udev; usb_set_intfdata(intf, hw); /* For dual MAC RTL8192DU, which has two interfaces. */ rtlpriv->rtlhal.interfaceindex = intf->altsetting[0].desc.bInterfaceNumber; /* init cfg & intf_ops */ rtlpriv->rtlhal.interface = INTF_USB; rtlpriv->cfg = rtl_hal_cfg; rtlpriv->intf_ops = &rtl_usb_ops; /* Init IO handler */ _rtl_usb_io_handler_init(&udev->dev, hw); rtlpriv->cfg->ops->read_chip_version(hw); /*like read eeprom and so on */ rtlpriv->cfg->ops->read_eeprom_info(hw); err = _rtl_usb_init(hw); if (err) goto error_out2; rtl_usb_init_sw(hw); /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { pr_err("Can't allocate sw for mac80211\n"); goto error_out2; } if (rtlpriv->cfg->ops->init_sw_vars(hw)) { pr_err("Can't init_sw_vars\n"); goto error_out; } rtl_init_sw_leds(hw); err = ieee80211_register_hw(hw); if (err) { pr_err("Can't register mac80211 hw.\n"); goto error_init_vars; } rtlpriv->mac80211.mac80211_registered = 1; set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); return 0; error_init_vars: wait_for_completion(&rtlpriv->firmware_loading_complete); rtlpriv->cfg->ops->deinit_sw_vars(hw); error_out: rtl_usb_deinit(hw); rtl_deinit_core(hw); error_out2: _rtl_usb_io_handler_release(hw); usb_put_dev(udev); kfree(rtlpriv->usb_data); ieee80211_free_hw(hw); return -ENODEV; } EXPORT_SYMBOL(rtl_usb_probe); void rtl_usb_disconnect(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); if (unlikely(!rtlpriv)) return; /* just in case driver is removed before firmware callback */ wait_for_completion(&rtlpriv->firmware_loading_complete); clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); /*ieee80211_unregister_hw will call ops_stop */ if (rtlmac->mac80211_registered == 1) { ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { rtl_deinit_deferred_work(hw, false); rtlpriv->intf_ops->adapter_stop(hw); } /*deinit rfkill */ /* rtl_deinit_rfkill(hw); */ rtl_usb_deinit(hw); rtl_deinit_core(hw); kfree(rtlpriv->usb_data); rtlpriv->cfg->ops->deinit_sw_vars(hw); _rtl_usb_io_handler_release(hw); usb_put_dev(rtlusb->udev); usb_set_intfdata(intf, NULL); ieee80211_free_hw(hw); } EXPORT_SYMBOL(rtl_usb_disconnect); int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message) { return 0; } EXPORT_SYMBOL(rtl_usb_suspend); int rtl_usb_resume(struct usb_interface *pusb_intf) { return 0; } EXPORT_SYMBOL(rtl_usb_resume);
4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 // SPDX-License-Identifier: GPL-2.0-only /*************************************************************************** * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> * * * * Based on Logitech G13 driver (v0.4) * * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> * * * ***************************************************************************/ #include <linux/hid.h> #include <linux/fb.h> #include <linux/lcd.h> #include "hid-picolcd.h" /* * lcd class device */ static int picolcd_get_contrast(struct lcd_device *ldev) { struct picolcd_data *data = lcd_get_data(ldev); return data->lcd_contrast; } static int picolcd_set_contrast(struct lcd_device *ldev, int contrast) { struct picolcd_data *data = lcd_get_data(ldev); struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev); unsigned long flags; if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) return -ENODEV; data->lcd_contrast = contrast & 0x0ff; spin_lock_irqsave(&data->lock, flags); hid_set_field(report->field[0], 0, data->lcd_contrast); if (!(data->status & PICOLCD_FAILED)) hid_hw_request(data->hdev, report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&data->lock, flags); return 0; } static const struct lcd_ops picolcd_lcdops = { .get_contrast = picolcd_get_contrast, .set_contrast = picolcd_set_contrast, }; int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report) { struct device *dev = &data->hdev->dev; struct lcd_device *ldev; if (!report) return -ENODEV; if (report->maxfield != 1 || report->field[0]->report_count != 1 || report->field[0]->report_size != 8) { dev_err(dev, "unsupported CONTRAST report"); return -EINVAL; } ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops); if (IS_ERR(ldev)) { dev_err(dev, "failed to register LCD\n"); return PTR_ERR(ldev); } ldev->props.max_contrast = 0x0ff; data->lcd_contrast = 0xe5; data->lcd = ldev; picolcd_set_contrast(ldev, 0xe5); return 0; } void picolcd_exit_lcd(struct picolcd_data *data) { struct lcd_device *ldev = data->lcd; data->lcd = NULL; lcd_device_unregister(ldev); } int picolcd_resume_lcd(struct picolcd_data *data) { if (!data->lcd) return 0; return picolcd_set_contrast(data->lcd, data->lcd_contrast); }
10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 // SPDX-License-Identifier: GPL-2.0 #ifndef _DRM_MANAGED_H_ #define _DRM_MANAGED_H_ #include <linux/gfp.h> #include <linux/overflow.h> #include <linux/types.h> struct drm_device; struct mutex; typedef void (*drmres_release_t)(struct drm_device *dev, void *res); /** * drmm_add_action - add a managed release action to a &drm_device * @dev: DRM device * @action: function which should be called when @dev is released * @data: opaque pointer, passed to @action * * This function adds the @release action with optional parameter @data to the * list of cleanup actions for @dev. The cleanup actions will be run in reverse * order in the final drm_dev_put() call for @dev. */ #define drmm_add_action(dev, action, data) \ __drmm_add_action(dev, action, data, #action) int __must_check __drmm_add_action(struct drm_device *dev, drmres_release_t action, void *data, const char *name); /** * drmm_add_action_or_reset - add a managed release action to a &drm_device * @dev: DRM device * @action: function which should be called when @dev is released * @data: opaque pointer, passed to @action * * Similar to drmm_add_action(), with the only difference that upon failure * @action is directly called for any cleanup work necessary on failures. */ #define drmm_add_action_or_reset(dev, action, data) \ __drmm_add_action_or_reset(dev, action, data, #action) int __must_check __drmm_add_action_or_reset(struct drm_device *dev, drmres_release_t action, void *data, const char *name); void drmm_release_action(struct drm_device *dev, drmres_release_t action, void *data); void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc; /** * drmm_kzalloc - &drm_device managed kzalloc() * @dev: DRM device * @size: size of the memory allocation * @gfp: GFP allocation flags * * This is a &drm_device managed version of kzalloc(). The allocated memory is * automatically freed on the final drm_dev_put(). Memory can also be freed * before the final drm_dev_put() by calling drmm_kfree(). */ static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp) { return drmm_kmalloc(dev, size, gfp | __GFP_ZERO); } /** * drmm_kmalloc_array - &drm_device managed kmalloc_array() * @dev: DRM device * @n: number of array elements to allocate * @size: size of array member * @flags: GFP allocation flags * * This is a &drm_device managed version of kmalloc_array(). The allocated * memory is automatically freed on the final drm_dev_put() and works exactly * like a memory allocation obtained by drmm_kmalloc(). */ static inline void *drmm_kmalloc_array(struct drm_device *dev, size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return drmm_kmalloc(dev, bytes, flags); } /** * drmm_kcalloc - &drm_device managed kcalloc() * @dev: DRM device * @n: number of array elements to allocate * @size: size of array member * @flags: GFP allocation flags * * This is a &drm_device managed version of kcalloc(). The allocated memory is * automatically freed on the final drm_dev_put() and works exactly like a * memory allocation obtained by drmm_kmalloc(). */ static inline void *drmm_kcalloc(struct drm_device *dev, size_t n, size_t size, gfp_t flags) { return drmm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp); void drmm_kfree(struct drm_device *dev, void *data); void __drmm_mutex_release(struct drm_device *dev, void *res); /** * drmm_mutex_init - &drm_device-managed mutex_init() * @dev: DRM device * @lock: lock to be initialized * * Returns: * 0 on success, or a negative errno code otherwise. * * This is a &drm_device-managed version of mutex_init(). The initialized * lock is automatically destroyed on the final drm_dev_put(). */ #define drmm_mutex_init(dev, lock) ({ \ mutex_init(lock); \ drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \ }) \ void __drmm_workqueue_release(struct drm_device *device, void *wq); #define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \ ({ \ struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \ wq ? ({ \ int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \ ret ? ERR_PTR(ret) : wq; \ }) : \ wq; \ }) #endif
15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 594 618 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 // SPDX-License-Identifier: GPL-2.0-or-later /* * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de> * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz> * (c) 2007-2009 Jiri Kosina * * HID debugging support */ /* * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/kfifo.h> #include <linux/sched/signal.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/hid.h> #include <linux/hid-debug.h> static struct dentry *hid_debug_root; struct hid_usage_entry { unsigned page; unsigned usage; const char *description; }; static const struct hid_usage_entry hid_usage_table[] = { { 0x00, 0, "Undefined" }, { 0x01, 0, "GenericDesktop" }, { 0x01, 0x0001, "Pointer" }, { 0x01, 0x0002, "Mouse" }, { 0x01, 0x0004, "Joystick" }, { 0x01, 0x0005, "Gamepad" }, { 0x01, 0x0006, "Keyboard" }, { 0x01, 0x0007, "Keypad" }, { 0x01, 0x0008, "MultiaxisController" }, { 0x01, 0x0009, "TabletPCSystemControls" }, { 0x01, 0x000a, "WaterCoolingDevice" }, { 0x01, 0x000b, "ComputerChassisDevice" }, { 0x01, 0x000c, "WirelessRadioControls" }, { 0x01, 0x000d, "PortableDeviceControl" }, { 0x01, 0x000e, "SystemMultiAxisController" }, { 0x01, 0x000f, "SpatialController" }, { 0x01, 0x0010, "AssistiveControl" }, { 0x01, 0x0011, "DeviceDock" }, { 0x01, 0x0012, "DockableDevice" }, { 0x01, 0x0013, "CallStateManagementControl" }, { 0x01, 0x0030, "X" }, { 0x01, 0x0031, "Y" }, { 0x01, 0x0032, "Z" }, { 0x01, 0x0033, "Rx" }, { 0x01, 0x0034, "Ry" }, { 0x01, 0x0035, "Rz" }, { 0x01, 0x0036, "Slider" }, { 0x01, 0x0037, "Dial" }, { 0x01, 0x0038, "Wheel" }, { 0x01, 0x0039, "HatSwitch" }, { 0x01, 0x003a, "CountedBuffer" }, { 0x01, 0x003b, "ByteCount" }, { 0x01, 0x003c, "MotionWakeup" }, { 0x01, 0x003d, "Start" }, { 0x01, 0x003e, "Select" }, { 0x01, 0x0040, "Vx" }, { 0x01, 0x0041, "Vy" }, { 0x01, 0x0042, "Vz" }, { 0x01, 0x0043, "Vbrx" }, { 0x01, 0x0044, "Vbry" }, { 0x01, 0x0045, "Vbrz" }, { 0x01, 0x0046, "Vno" }, { 0x01, 0x0047, "FeatureNotification" }, { 0x01, 0x0048, "ResolutionMultiplier" }, { 0x01, 0x0049, "Qx" }, { 0x01, 0x004a, "Qy" }, { 0x01, 0x004b, "Qz" }, { 0x01, 0x004c, "Qw" }, { 0x01, 0x0080, "SystemControl" }, { 0x01, 0x0081, "SystemPowerDown" }, { 0x01, 0x0082, "SystemSleep" }, { 0x01, 0x0083, "SystemWakeUp" }, { 0x01, 0x0084, "SystemContextMenu" }, { 0x01, 0x0085, "SystemMainMenu" }, { 0x01, 0x0086, "SystemAppMenu" }, { 0x01, 0x0087, "SystemMenuHelp" }, { 0x01, 0x0088, "SystemMenuExit" }, { 0x01, 0x0089, "SystemMenuSelect" }, { 0x01, 0x008a, "SystemMenuRight" }, { 0x01, 0x008b, "SystemMenuLeft" }, { 0x01, 0x008c, "SystemMenuUp" }, { 0x01, 0x008d, "SystemMenuDown" }, { 0x01, 0x008e, "SystemColdRestart" }, { 0x01, 0x008f, "SystemWarmRestart" }, { 0x01, 0x0090, "DpadUp" }, { 0x01, 0x0091, "DpadDown" }, { 0x01, 0x0092, "DpadRight" }, { 0x01, 0x0093, "DpadLeft" }, { 0x01, 0x0094, "IndexTrigger" }, { 0x01, 0x0095, "PalmTrigger" }, { 0x01, 0x0096, "Thumbstick" }, { 0x01, 0x0097, "SystemFunctionShift" }, { 0x01, 0x0098, "SystemFunctionShiftLock" }, { 0x01, 0x0099, "SystemFunctionShiftLockIndicator" }, { 0x01, 0x009a, "SystemDismissNotification" }, { 0x01, 0x009b, "SystemDoNotDisturb" }, { 0x01, 0x00a0, "SystemDock" }, { 0x01, 0x00a1, "SystemUndock" }, { 0x01, 0x00a2, "SystemSetup" }, { 0x01, 0x00a3, "SystemBreak" }, { 0x01, 0x00a4, "SystemDebuggerBreak" }, { 0x01, 0x00a5, "ApplicationBreak" }, { 0x01, 0x00a6, "ApplicationDebuggerBreak" }, { 0x01, 0x00a7, "SystemSpeakerMute" }, { 0x01, 0x00a8, "SystemHibernate" }, { 0x01, 0x00a9, "SystemMicrophoneMute" }, { 0x01, 0x00b0, "SystemDisplayInvert" }, { 0x01, 0x00b1, "SystemDisplayInternal" }, { 0x01, 0x00b2, "SystemDisplayExternal" }, { 0x01, 0x00b3, "SystemDisplayBoth" }, { 0x01, 0x00b4, "SystemDisplayDual" }, { 0x01, 0x00b5, "SystemDisplayToggleIntExtMode" }, { 0x01, 0x00b6, "SystemDisplaySwapPrimarySecondary" }, { 0x01, 0x00b7, "SystemDisplayToggleLCDAutoscale" }, { 0x01, 0x00c0, "SensorZone" }, { 0x01, 0x00c1, "RPM" }, { 0x01, 0x00c2, "CoolantLevel" }, { 0x01, 0x00c3, "CoolantCriticalLevel" }, { 0x01, 0x00c4, "CoolantPump" }, { 0x01, 0x00c5, "ChassisEnclosure" }, { 0x01, 0x00c6, "WirelessRadioButton" }, { 0x01, 0x00c7, "WirelessRadioLED" }, { 0x01, 0x00c8, "WirelessRadioSliderSwitch" }, { 0x01, 0x00c9, "SystemDisplayRotationLockButton" }, { 0x01, 0x00ca, "SystemDisplayRotationLockSliderSwitch" }, { 0x01, 0x00cb, "ControlEnable" }, { 0x01, 0x00d0, "DockableDeviceUniqueID" }, { 0x01, 0x00d1, "DockableDeviceVendorID" }, { 0x01, 0x00d2, "DockableDevicePrimaryUsagePage" }, { 0x01, 0x00d3, "DockableDevicePrimaryUsageID" }, { 0x01, 0x00d4, "DockableDeviceDockingState" }, { 0x01, 0x00d5, "DockableDeviceDisplayOcclusion" }, { 0x01, 0x00d6, "DockableDeviceObjectType" }, { 0x01, 0x00e0, "CallActiveLED" }, { 0x01, 0x00e1, "CallMuteToggle" }, { 0x01, 0x00e2, "CallMuteLED" }, { 0x02, 0, "SimulationControls" }, { 0x02, 0x0001, "FlightSimulationDevice" }, { 0x02, 0x0002, "AutomobileSimulationDevice" }, { 0x02, 0x0003, "TankSimulationDevice" }, { 0x02, 0x0004, "SpaceshipSimulationDevice" }, { 0x02, 0x0005, "SubmarineSimulationDevice" }, { 0x02, 0x0006, "SailingSimulationDevice" }, { 0x02, 0x0007, "MotorcycleSimulationDevice" }, { 0x02, 0x0008, "SportsSimulationDevice" }, { 0x02, 0x0009, "AirplaneSimulationDevice" }, { 0x02, 0x000a, "HelicopterSimulationDevice" }, { 0x02, 0x000b, "MagicCarpetSimulationDevice" }, { 0x02, 0x000c, "BicycleSimulationDevice" }, { 0x02, 0x0020, "FlightControlStick" }, { 0x02, 0x0021, "FlightStick" }, { 0x02, 0x0022, "CyclicControl" }, { 0x02, 0x0023, "CyclicTrim" }, { 0x02, 0x0024, "FlightYoke" }, { 0x02, 0x0025, "TrackControl" }, { 0x02, 0x00b0, "Aileron" }, { 0x02, 0x00b1, "AileronTrim" }, { 0x02, 0x00b2, "AntiTorqueControl" }, { 0x02, 0x00b3, "AutopilotEnable" }, { 0x02, 0x00b4, "ChaffRelease" }, { 0x02, 0x00b5, "CollectiveControl" }, { 0x02, 0x00b6, "DiveBrake" }, { 0x02, 0x00b7, "ElectronicCountermeasures" }, { 0x02, 0x00b8, "Elevator" }, { 0x02, 0x00b9, "ElevatorTrim" }, { 0x02, 0x00ba, "Rudder" }, { 0x02, 0x00bb, "Throttle" }, { 0x02, 0x00bc, "FlightCommunications" }, { 0x02, 0x00bd, "FlareRelease" }, { 0x02, 0x00be, "LandingGear" }, { 0x02, 0x00bf, "ToeBrake" }, { 0x02, 0x00c0, "Trigger" }, { 0x02, 0x00c1, "WeaponsArm" }, { 0x02, 0x00c2, "WeaponsSelect" }, { 0x02, 0x00c3, "WingFlaps" }, { 0x02, 0x00c4, "Accelerator" }, { 0x02, 0x00c5, "Brake" }, { 0x02, 0x00c6, "Clutch" }, { 0x02, 0x00c7, "Shifter" }, { 0x02, 0x00c8, "Steering" }, { 0x02, 0x00c9, "TurretDirection" }, { 0x02, 0x00ca, "BarrelElevation" }, { 0x02, 0x00cb, "DivePlane" }, { 0x02, 0x00cc, "Ballast" }, { 0x02, 0x00cd, "BicycleCrank" }, { 0x02, 0x00ce, "HandleBars" }, { 0x02, 0x00cf, "FrontBrake" }, { 0x02, 0x00d0, "RearBrake" }, { 0x03, 0, "VRControls" }, { 0x03, 0x0001, "Belt" }, { 0x03, 0x0002, "BodySuit" }, { 0x03, 0x0003, "Flexor" }, { 0x03, 0x0004, "Glove" }, { 0x03, 0x0005, "HeadTracker" }, { 0x03, 0x0006, "HeadMountedDisplay" }, { 0x03, 0x0007, "HandTracker" }, { 0x03, 0x0008, "Oculometer" }, { 0x03, 0x0009, "Vest" }, { 0x03, 0x000a, "AnimatronicDevice" }, { 0x03, 0x0020, "StereoEnable" }, { 0x03, 0x0021, "DisplayEnable" }, { 0x04, 0, "SportControls" }, { 0x04, 0x0001, "BaseballBat" }, { 0x04, 0x0002, "GolfClub" }, { 0x04, 0x0003, "RowingMachine" }, { 0x04, 0x0004, "Treadmill" }, { 0x04, 0x0030, "Oar" }, { 0x04, 0x0031, "Slope" }, { 0x04, 0x0032, "Rate" }, { 0x04, 0x0033, "StickSpeed" }, { 0x04, 0x0034, "StickFaceAngle" }, { 0x04, 0x0035, "StickHeelToe" }, { 0x04, 0x0036, "StickFollowThrough" }, { 0x04, 0x0037, "StickTempo" }, { 0x04, 0x0038, "StickType" }, { 0x04, 0x0039, "StickHeight" }, { 0x04, 0x0050, "Putter" }, { 0x04, 0x0051, "1Iron" }, { 0x04, 0x0052, "2Iron" }, { 0x04, 0x0053, "3Iron" }, { 0x04, 0x0054, "4Iron" }, { 0x04, 0x0055, "5Iron" }, { 0x04, 0x0056, "6Iron" }, { 0x04, 0x0057, "7Iron" }, { 0x04, 0x0058, "8Iron" }, { 0x04, 0x0059, "9Iron" }, { 0x04, 0x005a, "10Iron" }, { 0x04, 0x005b, "11Iron" }, { 0x04, 0x005c, "SandWedge" }, { 0x04, 0x005d, "LoftWedge" }, { 0x04, 0x005e, "PowerWedge" }, { 0x04, 0x005f, "1Wood" }, { 0x04, 0x0060, "3Wood" }, { 0x04, 0x0061, "5Wood" }, { 0x04, 0x0062, "7Wood" }, { 0x04, 0x0063, "9Wood" }, { 0x05, 0, "GameControls" }, { 0x05, 0x0001, "3DGameController" }, { 0x05, 0x0002, "PinballDevice" }, { 0x05, 0x0003, "GunDevice" }, { 0x05, 0x0020, "PointofView" }, { 0x05, 0x0021, "TurnRightLeft" }, { 0x05, 0x0022, "PitchForwardBackward" }, { 0x05, 0x0023, "RollRightLeft" }, { 0x05, 0x0024, "MoveRightLeft" }, { 0x05, 0x0025, "MoveForwardBackward" }, { 0x05, 0x0026, "MoveUpDown" }, { 0x05, 0x0027, "LeanRightLeft" }, { 0x05, 0x0028, "LeanForwardBackward" }, { 0x05, 0x0029, "HeightofPOV" }, { 0x05, 0x002a, "Flipper" }, { 0x05, 0x002b, "SecondaryFlipper" }, { 0x05, 0x002c, "Bump" }, { 0x05, 0x002d, "NewGame" }, { 0x05, 0x002e, "ShootBall" }, { 0x05, 0x002f, "Player" }, { 0x05, 0x0030, "GunBolt" }, { 0x05, 0x0031, "GunClip" }, { 0x05, 0x0032, "GunSelector" }, { 0x05, 0x0033, "GunSingleShot" }, { 0x05, 0x0034, "GunBurst" }, { 0x05, 0x0035, "GunAutomatic" }, { 0x05, 0x0036, "GunSafety" }, { 0x05, 0x0037, "GamepadFireJump" }, { 0x05, 0x0039, "GamepadTrigger" }, { 0x05, 0x003a, "FormfittingGamepad" }, { 0x06, 0, "GenericDeviceControls" }, { 0x06, 0x0001, "BackgroundNonuserControls" }, { 0x06, 0x0020, "BatteryStrength" }, { 0x06, 0x0021, "WirelessChannel" }, { 0x06, 0x0022, "WirelessID" }, { 0x06, 0x0023, "DiscoverWirelessControl" }, { 0x06, 0x0024, "SecurityCodeCharacterEntered" }, { 0x06, 0x0025, "SecurityCodeCharacterErased" }, { 0x06, 0x0026, "SecurityCodeCleared" }, { 0x06, 0x0027, "SequenceID" }, { 0x06, 0x0028, "SequenceIDReset" }, { 0x06, 0x0029, "RFSignalStrength" }, { 0x06, 0x002a, "SoftwareVersion" }, { 0x06, 0x002b, "ProtocolVersion" }, { 0x06, 0x002c, "HardwareVersion" }, { 0x06, 0x002d, "Major" }, { 0x06, 0x002e, "Minor" }, { 0x06, 0x002f, "Revision" }, { 0x06, 0x0030, "Handedness" }, { 0x06, 0x0031, "EitherHand" }, { 0x06, 0x0032, "LeftHand" }, { 0x06, 0x0033, "RightHand" }, { 0x06, 0x0034, "BothHands" }, { 0x06, 0x0040, "GripPoseOffset" }, { 0x06, 0x0041, "PointerPoseOffset" }, { 0x07, 0, "KeyboardKeypad" }, { 0x07, 0x0001, "ErrorRollOver" }, { 0x07, 0x0002, "POSTFail" }, { 0x07, 0x0003, "ErrorUndefined" }, { 0x07, 0x0004, "KeyboardA" }, { 0x07, 0x0005, "KeyboardB" }, { 0x07, 0x0006, "KeyboardC" }, { 0x07, 0x0007, "KeyboardD" }, { 0x07, 0x0008, "KeyboardE" }, { 0x07, 0x0009, "KeyboardF" }, { 0x07, 0x000a, "KeyboardG" }, { 0x07, 0x000b, "KeyboardH" }, { 0x07, 0x000c, "KeyboardI" }, { 0x07, 0x000d, "KeyboardJ" }, { 0x07, 0x000e, "KeyboardK" }, { 0x07, 0x000f, "KeyboardL" }, { 0x07, 0x0010, "KeyboardM" }, { 0x07, 0x0011, "KeyboardN" }, { 0x07, 0x0012, "KeyboardO" }, { 0x07, 0x0013, "KeyboardP" }, { 0x07, 0x0014, "KeyboardQ" }, { 0x07, 0x0015, "KeyboardR" }, { 0x07, 0x0016, "KeyboardS" }, { 0x07, 0x0017, "KeyboardT" }, { 0x07, 0x0018, "KeyboardU" }, { 0x07, 0x0019, "KeyboardV" }, { 0x07, 0x001a, "KeyboardW" }, { 0x07, 0x001b, "KeyboardX" }, { 0x07, 0x001c, "KeyboardY" }, { 0x07, 0x001d, "KeyboardZ" }, { 0x07, 0x001e, "Keyboard1andBang" }, { 0x07, 0x001f, "Keyboard2andAt" }, { 0x07, 0x0020, "Keyboard3andHash" }, { 0x07, 0x0021, "Keyboard4andDollar" }, { 0x07, 0x0022, "Keyboard5andPercent" }, { 0x07, 0x0023, "Keyboard6andCaret" }, { 0x07, 0x0024, "Keyboard7andAmpersand" }, { 0x07, 0x0025, "Keyboard8andStar" }, { 0x07, 0x0026, "Keyboard9andLeftBracket" }, { 0x07, 0x0027, "Keyboard0andRightBracket" }, { 0x07, 0x0028, "KeyboardReturnEnter" }, { 0x07, 0x0029, "KeyboardEscape" }, { 0x07, 0x002a, "KeyboardDelete" }, { 0x07, 0x002b, "KeyboardTab" }, { 0x07, 0x002c, "KeyboardSpacebar" }, { 0x07, 0x002d, "KeyboardDashandUnderscore" }, { 0x07, 0x002e, "KeyboardEqualsandPlus" }, { 0x07, 0x002f, "KeyboardLeftBrace" }, { 0x07, 0x0030, "KeyboardRightBrace" }, { 0x07, 0x0031, "KeyboardBackslashandPipe" }, { 0x07, 0x0032, "KeyboardNonUSHashandTilde" }, { 0x07, 0x0033, "KeyboardSemiColonandColon" }, { 0x07, 0x0034, "KeyboardLeftAposandDouble" }, { 0x07, 0x0035, "KeyboardGraveAccentandTilde" }, { 0x07, 0x0036, "KeyboardCommaandLessThan" }, { 0x07, 0x0037, "KeyboardPeriodandGreaterThan" }, { 0x07, 0x0038, "KeyboardForwardSlashandQuestionMark" }, { 0x07, 0x0039, "KeyboardCapsLock" }, { 0x07, 0x003a, "KeyboardF1" }, { 0x07, 0x003b, "KeyboardF2" }, { 0x07, 0x003c, "KeyboardF3" }, { 0x07, 0x003d, "KeyboardF4" }, { 0x07, 0x003e, "KeyboardF5" }, { 0x07, 0x003f, "KeyboardF6" }, { 0x07, 0x0040, "KeyboardF7" }, { 0x07, 0x0041, "KeyboardF8" }, { 0x07, 0x0042, "KeyboardF9" }, { 0x07, 0x0043, "KeyboardF10" }, { 0x07, 0x0044, "KeyboardF11" }, { 0x07, 0x0045, "KeyboardF12" }, { 0x07, 0x0046, "KeyboardPrintScreen" }, { 0x07, 0x0047, "KeyboardScrollLock" }, { 0x07, 0x0048, "KeyboardPause" }, { 0x07, 0x0049, "KeyboardInsert" }, { 0x07, 0x004a, "KeyboardHome" }, { 0x07, 0x004b, "KeyboardPageUp" }, { 0x07, 0x004c, "KeyboardDeleteForward" }, { 0x07, 0x004d, "KeyboardEnd" }, { 0x07, 0x004e, "KeyboardPageDown" }, { 0x07, 0x004f, "KeyboardRightArrow" }, { 0x07, 0x0050, "KeyboardLeftArrow" }, { 0x07, 0x0051, "KeyboardDownArrow" }, { 0x07, 0x0052, "KeyboardUpArrow" }, { 0x07, 0x0053, "KeypadNumLockandClear" }, { 0x07, 0x0054, "KeypadForwardSlash" }, { 0x07, 0x0055, "KeypadStar" }, { 0x07, 0x0056, "KeypadDash" }, { 0x07, 0x0057, "KeypadPlus" }, { 0x07, 0x0058, "KeypadENTER" }, { 0x07, 0x0059, "Keypad1andEnd" }, { 0x07, 0x005a, "Keypad2andDownArrow" }, { 0x07, 0x005b, "Keypad3andPageDn" }, { 0x07, 0x005c, "Keypad4andLeftArrow" }, { 0x07, 0x005d, "Keypad5" }, { 0x07, 0x005e, "Keypad6andRightArrow" }, { 0x07, 0x005f, "Keypad7andHome" }, { 0x07, 0x0060, "Keypad8andUpArrow" }, { 0x07, 0x0061, "Keypad9andPageUp" }, { 0x07, 0x0062, "Keypad0andInsert" }, { 0x07, 0x0063, "KeypadPeriodandDelete" }, { 0x07, 0x0064, "KeyboardNonUSBackslashandPipe" }, { 0x07, 0x0065, "KeyboardApplication" }, { 0x07, 0x0066, "KeyboardPower" }, { 0x07, 0x0067, "KeypadEquals" }, { 0x07, 0x0068, "KeyboardF13" }, { 0x07, 0x0069, "KeyboardF14" }, { 0x07, 0x006a, "KeyboardF15" }, { 0x07, 0x006b, "KeyboardF16" }, { 0x07, 0x006c, "KeyboardF17" }, { 0x07, 0x006d, "KeyboardF18" }, { 0x07, 0x006e, "KeyboardF19" }, { 0x07, 0x006f, "KeyboardF20" }, { 0x07, 0x0070, "KeyboardF21" }, { 0x07, 0x0071, "KeyboardF22" }, { 0x07, 0x0072, "KeyboardF23" }, { 0x07, 0x0073, "KeyboardF24" }, { 0x07, 0x0074, "KeyboardExecute" }, { 0x07, 0x0075, "KeyboardHelp" }, { 0x07, 0x0076, "KeyboardMenu" }, { 0x07, 0x0077, "KeyboardSelect" }, { 0x07, 0x0078, "KeyboardStop" }, { 0x07, 0x0079, "KeyboardAgain" }, { 0x07, 0x007a, "KeyboardUndo" }, { 0x07, 0x007b, "KeyboardCut" }, { 0x07, 0x007c, "KeyboardCopy" }, { 0x07, 0x007d, "KeyboardPaste" }, { 0x07, 0x007e, "KeyboardFind" }, { 0x07, 0x007f, "KeyboardMute" }, { 0x07, 0x0080, "KeyboardVolumeUp" }, { 0x07, 0x0081, "KeyboardVolumeDown" }, { 0x07, 0x0082, "KeyboardLockingCapsLock" }, { 0x07, 0x0083, "KeyboardLockingNumLock" }, { 0x07, 0x0084, "KeyboardLockingScrollLock" }, { 0x07, 0x0085, "KeypadComma" }, { 0x07, 0x0086, "KeypadEqualSign" }, { 0x07, 0x0087, "KeyboardInternational1" }, { 0x07, 0x0088, "KeyboardInternational2" }, { 0x07, 0x0089, "KeyboardInternational3" }, { 0x07, 0x008a, "KeyboardInternational4" }, { 0x07, 0x008b, "KeyboardInternational5" }, { 0x07, 0x008c, "KeyboardInternational6" }, { 0x07, 0x008d, "KeyboardInternational7" }, { 0x07, 0x008e, "KeyboardInternational8" }, { 0x07, 0x008f, "KeyboardInternational9" }, { 0x07, 0x0090, "KeyboardLANG1" }, { 0x07, 0x0091, "KeyboardLANG2" }, { 0x07, 0x0092, "KeyboardLANG3" }, { 0x07, 0x0093, "KeyboardLANG4" }, { 0x07, 0x0094, "KeyboardLANG5" }, { 0x07, 0x0095, "KeyboardLANG6" }, { 0x07, 0x0096, "KeyboardLANG7" }, { 0x07, 0x0097, "KeyboardLANG8" }, { 0x07, 0x0098, "KeyboardLANG9" }, { 0x07, 0x0099, "KeyboardAlternateErase" }, { 0x07, 0x009a, "KeyboardSysReqAttention" }, { 0x07, 0x009b, "KeyboardCancel" }, { 0x07, 0x009c, "KeyboardClear" }, { 0x07, 0x009d, "KeyboardPrior" }, { 0x07, 0x009e, "KeyboardReturn" }, { 0x07, 0x009f, "KeyboardSeparator" }, { 0x07, 0x00a0, "KeyboardOut" }, { 0x07, 0x00a1, "KeyboardOper" }, { 0x07, 0x00a2, "KeyboardClearAgain" }, { 0x07, 0x00a3, "KeyboardCrSelProps" }, { 0x07, 0x00a4, "KeyboardExSel" }, { 0x07, 0x00b0, "KeypadDouble0" }, { 0x07, 0x00b1, "KeypadTriple0" }, { 0x07, 0x00b2, "ThousandsSeparator" }, { 0x07, 0x00b3, "DecimalSeparator" }, { 0x07, 0x00b4, "CurrencyUnit" }, { 0x07, 0x00b5, "CurrencySubunit" }, { 0x07, 0x00b6, "KeypadLeftBracket" }, { 0x07, 0x00b7, "KeypadRightBracket" }, { 0x07, 0x00b8, "KeypadLeftBrace" }, { 0x07, 0x00b9, "KeypadRightBrace" }, { 0x07, 0x00ba, "KeypadTab" }, { 0x07, 0x00bb, "KeypadBackspace" }, { 0x07, 0x00bc, "KeypadA" }, { 0x07, 0x00bd, "KeypadB" }, { 0x07, 0x00be, "KeypadC" }, { 0x07, 0x00bf, "KeypadD" }, { 0x07, 0x00c0, "KeypadE" }, { 0x07, 0x00c1, "KeypadF" }, { 0x07, 0x00c2, "KeypadXOR" }, { 0x07, 0x00c3, "KeypadCaret" }, { 0x07, 0x00c4, "KeypadPercentage" }, { 0x07, 0x00c5, "KeypadLess" }, { 0x07, 0x00c6, "KeypadGreater" }, { 0x07, 0x00c7, "KeypadAmpersand" }, { 0x07, 0x00c8, "KeypadDoubleAmpersand" }, { 0x07, 0x00c9, "KeypadBar" }, { 0x07, 0x00ca, "KeypadDoubleBar" }, { 0x07, 0x00cb, "KeypadColon" }, { 0x07, 0x00cc, "KeypadHash" }, { 0x07, 0x00cd, "KeypadSpace" }, { 0x07, 0x00ce, "KeypadAt" }, { 0x07, 0x00cf, "KeypadBang" }, { 0x07, 0x00d0, "KeypadMemoryStore" }, { 0x07, 0x00d1, "KeypadMemoryRecall" }, { 0x07, 0x00d2, "KeypadMemoryClear" }, { 0x07, 0x00d3, "KeypadMemoryAdd" }, { 0x07, 0x00d4, "KeypadMemorySubtract" }, { 0x07, 0x00d5, "KeypadMemoryMultiply" }, { 0x07, 0x00d6, "KeypadMemoryDivide" }, { 0x07, 0x00d7, "KeypadPlusMinus" }, { 0x07, 0x00d8, "KeypadClear" }, { 0x07, 0x00d9, "KeypadClearEntry" }, { 0x07, 0x00da, "KeypadBinary" }, { 0x07, 0x00db, "KeypadOctal" }, { 0x07, 0x00dc, "KeypadDecimal" }, { 0x07, 0x00dd, "KeypadHexadecimal" }, { 0x07, 0x00e0, "KeyboardLeftControl" }, { 0x07, 0x00e1, "KeyboardLeftShift" }, { 0x07, 0x00e2, "KeyboardLeftAlt" }, { 0x07, 0x00e3, "KeyboardLeftGUI" }, { 0x07, 0x00e4, "KeyboardRightControl" }, { 0x07, 0x00e5, "KeyboardRightShift" }, { 0x07, 0x00e6, "KeyboardRightAlt" }, { 0x07, 0x00e7, "KeyboardRightGUI" }, { 0x08, 0, "LED" }, { 0x08, 0x0001, "NumLock" }, { 0x08, 0x0002, "CapsLock" }, { 0x08, 0x0003, "ScrollLock" }, { 0x08, 0x0004, "Compose" }, { 0x08, 0x0005, "Kana" }, { 0x08, 0x0006, "Power" }, { 0x08, 0x0007, "Shift" }, { 0x08, 0x0008, "DoNotDisturb" }, { 0x08, 0x0009, "Mute" }, { 0x08, 0x000a, "ToneEnable" }, { 0x08, 0x000b, "HighCutFilter" }, { 0x08, 0x000c, "LowCutFilter" }, { 0x08, 0x000d, "EqualizerEnable" }, { 0x08, 0x000e, "SoundFieldOn" }, { 0x08, 0x000f, "SurroundOn" }, { 0x08, 0x0010, "Repeat" }, { 0x08, 0x0011, "Stereo" }, { 0x08, 0x0012, "SamplingRateDetect" }, { 0x08, 0x0013, "Spinning" }, { 0x08, 0x0014, "CAV" }, { 0x08, 0x0015, "CLV" }, { 0x08, 0x0016, "RecordingFormatDetect" }, { 0x08, 0x0017, "OffHook" }, { 0x08, 0x0018, "Ring" }, { 0x08, 0x0019, "MessageWaiting" }, { 0x08, 0x001a, "DataMode" }, { 0x08, 0x001b, "BatteryOperation" }, { 0x08, 0x001c, "BatteryOK" }, { 0x08, 0x001d, "BatteryLow" }, { 0x08, 0x001e, "Speaker" }, { 0x08, 0x001f, "Headset" }, { 0x08, 0x0020, "Hold" }, { 0x08, 0x0021, "Microphone" }, { 0x08, 0x0022, "Coverage" }, { 0x08, 0x0023, "NightMode" }, { 0x08, 0x0024, "SendCalls" }, { 0x08, 0x0025, "CallPickup" }, { 0x08, 0x0026, "Conference" }, { 0x08, 0x0027, "Standby" }, { 0x08, 0x0028, "CameraOn" }, { 0x08, 0x0029, "CameraOff" }, { 0x08, 0x002a, "OnLine" }, { 0x08, 0x002b, "OffLine" }, { 0x08, 0x002c, "Busy" }, { 0x08, 0x002d, "Ready" }, { 0x08, 0x002e, "PaperOut" }, { 0x08, 0x002f, "PaperJam" }, { 0x08, 0x0030, "Remote" }, { 0x08, 0x0031, "Forward" }, { 0x08, 0x0032, "Reverse" }, { 0x08, 0x0033, "Stop" }, { 0x08, 0x0034, "Rewind" }, { 0x08, 0x0035, "FastForward" }, { 0x08, 0x0036, "Play" }, { 0x08, 0x0037, "Pause" }, { 0x08, 0x0038, "Record" }, { 0x08, 0x0039, "Error" }, { 0x08, 0x003a, "UsageSelectedIndicator" }, { 0x08, 0x003b, "UsageInUseIndicator" }, { 0x08, 0x003c, "UsageMultiModeIndicator" }, { 0x08, 0x003d, "IndicatorOn" }, { 0x08, 0x003e, "IndicatorFlash" }, { 0x08, 0x003f, "IndicatorSlowBlink" }, { 0x08, 0x0040, "IndicatorFastBlink" }, { 0x08, 0x0041, "IndicatorOff" }, { 0x08, 0x0042, "FlashOnTime" }, { 0x08, 0x0043, "SlowBlinkOnTime" }, { 0x08, 0x0044, "SlowBlinkOffTime" }, { 0x08, 0x0045, "FastBlinkOnTime" }, { 0x08, 0x0046, "FastBlinkOffTime" }, { 0x08, 0x0047, "UsageIndicatorColor" }, { 0x08, 0x0048, "IndicatorRed" }, { 0x08, 0x0049, "IndicatorGreen" }, { 0x08, 0x004a, "IndicatorAmber" }, { 0x08, 0x004b, "GenericIndicator" }, { 0x08, 0x004c, "SystemSuspend" }, { 0x08, 0x004d, "ExternalPowerConnected" }, { 0x08, 0x004e, "IndicatorBlue" }, { 0x08, 0x004f, "IndicatorOrange" }, { 0x08, 0x0050, "GoodStatus" }, { 0x08, 0x0051, "WarningStatus" }, { 0x08, 0x0052, "RGBLED" }, { 0x08, 0x0053, "RedLEDChannel" }, { 0x08, 0x0054, "BlueLEDChannel" }, { 0x08, 0x0055, "GreenLEDChannel" }, { 0x08, 0x0056, "LEDIntensity" }, { 0x08, 0x0057, "SystemMicrophoneMute" }, { 0x08, 0x0060, "PlayerIndicator" }, { 0x08, 0x0061, "Player1" }, { 0x08, 0x0062, "Player2" }, { 0x08, 0x0063, "Player3" }, { 0x08, 0x0064, "Player4" }, { 0x08, 0x0065, "Player5" }, { 0x08, 0x0066, "Player6" }, { 0x08, 0x0067, "Player7" }, { 0x08, 0x0068, "Player8" }, { 0x09, 0, "Button" }, { 0x0a, 0, "Ordinal" }, { 0x0b, 0, "TelephonyDevice" }, { 0x0b, 0x0001, "Phone" }, { 0x0b, 0x0002, "AnsweringMachine" }, { 0x0b, 0x0003, "MessageControls" }, { 0x0b, 0x0004, "Handset" }, { 0x0b, 0x0005, "Headset" }, { 0x0b, 0x0006, "TelephonyKeyPad" }, { 0x0b, 0x0007, "ProgrammableButton" }, { 0x0b, 0x0020, "HookSwitch" }, { 0x0b, 0x0021, "Flash" }, { 0x0b, 0x0022, "Feature" }, { 0x0b, 0x0023, "Hold" }, { 0x0b, 0x0024, "Redial" }, { 0x0b, 0x0025, "Transfer" }, { 0x0b, 0x0026, "Drop" }, { 0x0b, 0x0027, "Park" }, { 0x0b, 0x0028, "ForwardCalls" }, { 0x0b, 0x0029, "AlternateFunction" }, { 0x0b, 0x002a, "Line" }, { 0x0b, 0x002b, "SpeakerPhone" }, { 0x0b, 0x002c, "Conference" }, { 0x0b, 0x002d, "RingEnable" }, { 0x0b, 0x002e, "RingSelect" }, { 0x0b, 0x002f, "PhoneMute" }, { 0x0b, 0x0030, "CallerID" }, { 0x0b, 0x0031, "Send" }, { 0x0b, 0x0050, "SpeedDial" }, { 0x0b, 0x0051, "StoreNumber" }, { 0x0b, 0x0052, "RecallNumber" }, { 0x0b, 0x0053, "PhoneDirectory" }, { 0x0b, 0x0070, "VoiceMail" }, { 0x0b, 0x0071, "ScreenCalls" }, { 0x0b, 0x0072, "DoNotDisturb" }, { 0x0b, 0x0073, "Message" }, { 0x0b, 0x0074, "AnswerOnOff" }, { 0x0b, 0x0090, "InsideDialTone" }, { 0x0b, 0x0091, "OutsideDialTone" }, { 0x0b, 0x0092, "InsideRingTone" }, { 0x0b, 0x0093, "OutsideRingTone" }, { 0x0b, 0x0094, "PriorityRingTone" }, { 0x0b, 0x0095, "InsideRingback" }, { 0x0b, 0x0096, "PriorityRingback" }, { 0x0b, 0x0097, "LineBusyTone" }, { 0x0b, 0x0098, "ReorderTone" }, { 0x0b, 0x0099, "CallWaitingTone" }, { 0x0b, 0x009a, "ConfirmationTone1" }, { 0x0b, 0x009b, "ConfirmationTone2" }, { 0x0b, 0x009c, "TonesOff" }, { 0x0b, 0x009d, "OutsideRingback" }, { 0x0b, 0x009e, "Ringer" }, { 0x0b, 0x00b0, "PhoneKey0" }, { 0x0b, 0x00b1, "PhoneKey1" }, { 0x0b, 0x00b2, "PhoneKey2" }, { 0x0b, 0x00b3, "PhoneKey3" }, { 0x0b, 0x00b4, "PhoneKey4" }, { 0x0b, 0x00b5, "PhoneKey5" }, { 0x0b, 0x00b6, "PhoneKey6" }, { 0x0b, 0x00b7, "PhoneKey7" }, { 0x0b, 0x00b8, "PhoneKey8" }, { 0x0b, 0x00b9, "PhoneKey9" }, { 0x0b, 0x00ba, "PhoneKeyStar" }, { 0x0b, 0x00bb, "PhoneKeyPound" }, { 0x0b, 0x00bc, "PhoneKeyA" }, { 0x0b, 0x00bd, "PhoneKeyB" }, { 0x0b, 0x00be, "PhoneKeyC" }, { 0x0b, 0x00bf, "PhoneKeyD" }, { 0x0b, 0x00c0, "PhoneCallHistoryKey" }, { 0x0b, 0x00c1, "PhoneCallerIDKey" }, { 0x0b, 0x00c2, "PhoneSettingsKey" }, { 0x0b, 0x00f0, "HostControl" }, { 0x0b, 0x00f1, "HostAvailable" }, { 0x0b, 0x00f2, "HostCallActive" }, { 0x0b, 0x00f3, "ActivateHandsetAudio" }, { 0x0b, 0x00f4, "RingType" }, { 0x0b, 0x00f5, "RedialablePhoneNumber" }, { 0x0b, 0x00f8, "StopRingTone" }, { 0x0b, 0x00f9, "PSTNRingTone" }, { 0x0b, 0x00fa, "HostRingTone" }, { 0x0b, 0x00fb, "AlertSoundError" }, { 0x0b, 0x00fc, "AlertSoundConfirm" }, { 0x0b, 0x00fd, "AlertSoundNotification" }, { 0x0b, 0x00fe, "SilentRing" }, { 0x0b, 0x0108, "EmailMessageWaiting" }, { 0x0b, 0x0109, "VoicemailMessageWaiting" }, { 0x0b, 0x010a, "HostHold" }, { 0x0b, 0x0110, "IncomingCallHistoryCount" }, { 0x0b, 0x0111, "OutgoingCallHistoryCount" }, { 0x0b, 0x0112, "IncomingCallHistory" }, { 0x0b, 0x0113, "OutgoingCallHistory" }, { 0x0b, 0x0114, "PhoneLocale" }, { 0x0b, 0x0140, "PhoneTimeSecond" }, { 0x0b, 0x0141, "PhoneTimeMinute" }, { 0x0b, 0x0142, "PhoneTimeHour" }, { 0x0b, 0x0143, "PhoneDateDay" }, { 0x0b, 0x0144, "PhoneDateMonth" }, { 0x0b, 0x0145, "PhoneDateYear" }, { 0x0b, 0x0146, "HandsetNickname" }, { 0x0b, 0x0147, "AddressBookID" }, { 0x0b, 0x014a, "CallDuration" }, { 0x0b, 0x014b, "DualModePhone" }, { 0x0c, 0, "Consumer" }, { 0x0c, 0x0001, "ConsumerControl" }, { 0x0c, 0x0002, "NumericKeyPad" }, { 0x0c, 0x0003, "ProgrammableButtons" }, { 0x0c, 0x0004, "Microphone" }, { 0x0c, 0x0005, "Headphone" }, { 0x0c, 0x0006, "GraphicEqualizer" }, { 0x0c, 0x0020, "10" }, { 0x0c, 0x0021, "100" }, { 0x0c, 0x0022, "AMPM" }, { 0x0c, 0x0030, "Power" }, { 0x0c, 0x0031, "Reset" }, { 0x0c, 0x0032, "Sleep" }, { 0x0c, 0x0033, "SleepAfter" }, { 0x0c, 0x0034, "SleepMode" }, { 0x0c, 0x0035, "Illumination" }, { 0x0c, 0x0036, "FunctionButtons" }, { 0x0c, 0x0040, "Menu" }, { 0x0c, 0x0041, "MenuPick" }, { 0x0c, 0x0042, "MenuUp" }, { 0x0c, 0x0043, "MenuDown" }, { 0x0c, 0x0044, "MenuLeft" }, { 0x0c, 0x0045, "MenuRight" }, { 0x0c, 0x0046, "MenuEscape" }, { 0x0c, 0x0047, "MenuValueIncrease" }, { 0x0c, 0x0048, "MenuValueDecrease" }, { 0x0c, 0x0060, "DataOnScreen" }, { 0x0c, 0x0061, "ClosedCaption" }, { 0x0c, 0x0062, "ClosedCaptionSelect" }, { 0x0c, 0x0063, "VCRTV" }, { 0x0c, 0x0064, "BroadcastMode" }, { 0x0c, 0x0065, "Snapshot" }, { 0x0c, 0x0066, "Still" }, { 0x0c, 0x0067, "PictureinPictureToggle" }, { 0x0c, 0x0068, "PictureinPictureSwap" }, { 0x0c, 0x0069, "RedMenuButton" }, { 0x0c, 0x006a, "GreenMenuButton" }, { 0x0c, 0x006b, "BlueMenuButton" }, { 0x0c, 0x006c, "YellowMenuButton" }, { 0x0c, 0x006d, "Aspect" }, { 0x0c, 0x006e, "3DModeSelect" }, { 0x0c, 0x006f, "DisplayBrightnessIncrement" }, { 0x0c, 0x0070, "DisplayBrightnessDecrement" }, { 0x0c, 0x0071, "DisplayBrightness" }, { 0x0c, 0x0072, "DisplayBacklightToggle" }, { 0x0c, 0x0073, "DisplaySetBrightnesstoMinimum" }, { 0x0c, 0x0074, "DisplaySetBrightnesstoMaximum" }, { 0x0c, 0x0075, "DisplaySetAutoBrightness" }, { 0x0c, 0x0076, "CameraAccessEnabled" }, { 0x0c, 0x0077, "CameraAccessDisabled" }, { 0x0c, 0x0078, "CameraAccessToggle" }, { 0x0c, 0x0079, "KeyboardBrightnessIncrement" }, { 0x0c, 0x007a, "KeyboardBrightnessDecrement" }, { 0x0c, 0x007b, "KeyboardBacklightSetLevel" }, { 0x0c, 0x007c, "KeyboardBacklightOOC" }, { 0x0c, 0x007d, "KeyboardBacklightSetMinimum" }, { 0x0c, 0x007e, "KeyboardBacklightSetMaximum" }, { 0x0c, 0x007f, "KeyboardBacklightAuto" }, { 0x0c, 0x0080, "Selection" }, { 0x0c, 0x0081, "AssignSelection" }, { 0x0c, 0x0082, "ModeStep" }, { 0x0c, 0x0083, "RecallLast" }, { 0x0c, 0x0084, "EnterChannel" }, { 0x0c, 0x0085, "OrderMovie" }, { 0x0c, 0x0086, "Channel" }, { 0x0c, 0x0087, "MediaSelection" }, { 0x0c, 0x0088, "MediaSelectComputer" }, { 0x0c, 0x0089, "MediaSelectTV" }, { 0x0c, 0x008a, "MediaSelectWWW" }, { 0x0c, 0x008b, "MediaSelectDVD" }, { 0x0c, 0x008c, "MediaSelectTelephone" }, { 0x0c, 0x008d, "MediaSelectProgramGuide" }, { 0x0c, 0x008e, "MediaSelectVideoPhone" }, { 0x0c, 0x008f, "MediaSelectGames" }, { 0x0c, 0x0090, "MediaSelectMessages" }, { 0x0c, 0x0091, "MediaSelectCD" }, { 0x0c, 0x0092, "MediaSelectVCR" }, { 0x0c, 0x0093, "MediaSelectTuner" }, { 0x0c, 0x0094, "Quit" }, { 0x0c, 0x0095, "Help" }, { 0x0c, 0x0096, "MediaSelectTape" }, { 0x0c, 0x0097, "MediaSelectCable" }, { 0x0c, 0x0098, "MediaSelectSatellite" }, { 0x0c, 0x0099, "MediaSelectSecurity" }, { 0x0c, 0x009a, "MediaSelectHome" }, { 0x0c, 0x009b, "MediaSelectCall" }, { 0x0c, 0x009c, "ChannelIncrement" }, { 0x0c, 0x009d, "ChannelDecrement" }, { 0x0c, 0x009e, "MediaSelectSAP" }, { 0x0c, 0x00a0, "VCRPlus" }, { 0x0c, 0x00a1, "Once" }, { 0x0c, 0x00a2, "Daily" }, { 0x0c, 0x00a3, "Weekly" }, { 0x0c, 0x00a4, "Monthly" }, { 0x0c, 0x00b0, "Play" }, { 0x0c, 0x00b1, "Pause" }, { 0x0c, 0x00b2, "Record" }, { 0x0c, 0x00b3, "FastForward" }, { 0x0c, 0x00b4, "Rewind" }, { 0x0c, 0x00b5, "ScanNextTrack" }, { 0x0c, 0x00b6, "ScanPreviousTrack" }, { 0x0c, 0x00b7, "Stop" }, { 0x0c, 0x00b8, "Eject" }, { 0x0c, 0x00b9, "RandomPlay" }, { 0x0c, 0x00ba, "SelectDisc" }, { 0x0c, 0x00bb, "EnterDisc" }, { 0x0c, 0x00bc, "Repeat" }, { 0x0c, 0x00bd, "Tracking" }, { 0x0c, 0x00be, "TrackNormal" }, { 0x0c, 0x00bf, "SlowTracking" }, { 0x0c, 0x00c0, "FrameForward" }, { 0x0c, 0x00c1, "FrameBack" }, { 0x0c, 0x00c2, "Mark" }, { 0x0c, 0x00c3, "ClearMark" }, { 0x0c, 0x00c4, "RepeatFromMark" }, { 0x0c, 0x00c5, "ReturnToMark" }, { 0x0c, 0x00c6, "SearchMarkForward" }, { 0x0c, 0x00c7, "SearchMarkBackwards" }, { 0x0c, 0x00c8, "CounterReset" }, { 0x0c, 0x00c9, "ShowCounter" }, { 0x0c, 0x00ca, "TrackingIncrement" }, { 0x0c, 0x00cb, "TrackingDecrement" }, { 0x0c, 0x00cc, "StopEject" }, { 0x0c, 0x00cd, "PlayPause" }, { 0x0c, 0x00ce, "PlaySkip" }, { 0x0c, 0x00cf, "VoiceCommand" }, { 0x0c, 0x00d0, "InvokeCaptureInterface" }, { 0x0c, 0x00d1, "StartorStopGameRecording" }, { 0x0c, 0x00d2, "HistoricalGameCapture" }, { 0x0c, 0x00d3, "CaptureGameScreenshot" }, { 0x0c, 0x00d4, "ShoworHideRecordingIndicator" }, { 0x0c, 0x00d5, "StartorStopMicrophoneCapture" }, { 0x0c, 0x00d6, "StartorStopCameraCapture" }, { 0x0c, 0x00d7, "StartorStopGameBroadcast" }, { 0x0c, 0x00d8, "StartorStopVoiceDictationSession" }, { 0x0c, 0x00d9, "InvokeDismissEmojiPicker" }, { 0x0c, 0x00e0, "Volume" }, { 0x0c, 0x00e1, "Balance" }, { 0x0c, 0x00e2, "Mute" }, { 0x0c, 0x00e3, "Bass" }, { 0x0c, 0x00e4, "Treble" }, { 0x0c, 0x00e5, "BassBoost" }, { 0x0c, 0x00e6, "SurroundMode" }, { 0x0c, 0x00e7, "Loudness" }, { 0x0c, 0x00e8, "MPX" }, { 0x0c, 0x00e9, "VolumeIncrement" }, { 0x0c, 0x00ea, "VolumeDecrement" }, { 0x0c, 0x00f0, "SpeedSelect" }, { 0x0c, 0x00f1, "PlaybackSpeed" }, { 0x0c, 0x00f2, "StandardPlay" }, { 0x0c, 0x00f3, "LongPlay" }, { 0x0c, 0x00f4, "ExtendedPlay" }, { 0x0c, 0x00f5, "Slow" }, { 0x0c, 0x0100, "FanEnable" }, { 0x0c, 0x0101, "FanSpeed" }, { 0x0c, 0x0102, "LightEnable" }, { 0x0c, 0x0103, "LightIlluminationLevel" }, { 0x0c, 0x0104, "ClimateControlEnable" }, { 0x0c, 0x0105, "RoomTemperature" }, { 0x0c, 0x0106, "SecurityEnable" }, { 0x0c, 0x0107, "FireAlarm" }, { 0x0c, 0x0108, "PoliceAlarm" }, { 0x0c, 0x0109, "Proximity" }, { 0x0c, 0x010a, "Motion" }, { 0x0c, 0x010b, "DuressAlarm" }, { 0x0c, 0x010c, "HoldupAlarm" }, { 0x0c, 0x010d, "MedicalAlarm" }, { 0x0c, 0x0150, "BalanceRight" }, { 0x0c, 0x0151, "BalanceLeft" }, { 0x0c, 0x0152, "BassIncrement" }, { 0x0c, 0x0153, "BassDecrement" }, { 0x0c, 0x0154, "TrebleIncrement" }, { 0x0c, 0x0155, "TrebleDecrement" }, { 0x0c, 0x0160, "SpeakerSystem" }, { 0x0c, 0x0161, "ChannelLeft" }, { 0x0c, 0x0162, "ChannelRight" }, { 0x0c, 0x0163, "ChannelCenter" }, { 0x0c, 0x0164, "ChannelFront" }, { 0x0c, 0x0165, "ChannelCenterFront" }, { 0x0c, 0x0166, "ChannelSide" }, { 0x0c, 0x0167, "ChannelSurround" }, { 0x0c, 0x0168, "ChannelLowFrequencyEnhancement" }, { 0x0c, 0x0169, "ChannelTop" }, { 0x0c, 0x016a, "ChannelUnknown" }, { 0x0c, 0x0170, "Subchannel" }, { 0x0c, 0x0171, "SubchannelIncrement" }, { 0x0c, 0x0172, "SubchannelDecrement" }, { 0x0c, 0x0173, "AlternateAudioIncrement" }, { 0x0c, 0x0174, "AlternateAudioDecrement" }, { 0x0c, 0x0180, "ApplicationLaunchButtons" }, { 0x0c, 0x0181, "ALLaunchButtonConfigurationTool" }, { 0x0c, 0x0182, "ALProgrammableButtonConfiguration" }, { 0x0c, 0x0183, "ALConsumerControlConfiguration" }, { 0x0c, 0x0184, "ALWordProcessor" }, { 0x0c, 0x0185, "ALTextEditor" }, { 0x0c, 0x0186, "ALSpreadsheet" }, { 0x0c, 0x0187, "ALGraphicsEditor" }, { 0x0c, 0x0188, "ALPresentationApp" }, { 0x0c, 0x0189, "ALDatabaseApp" }, { 0x0c, 0x018a, "ALEmailReader" }, { 0x0c, 0x018b, "ALNewsreader" }, { 0x0c, 0x018c, "ALVoicemail" }, { 0x0c, 0x018d, "ALContactsAddressBook" }, { 0x0c, 0x018e, "ALCalendarSchedule" }, { 0x0c, 0x018f, "ALTaskProjectManager" }, { 0x0c, 0x0190, "ALLogJournalTimecard" }, { 0x0c, 0x0191, "ALCheckbookFinance" }, { 0x0c, 0x0192, "ALCalculator" }, { 0x0c, 0x0193, "ALAVCapturePlayback" }, { 0x0c, 0x0194, "ALLocalMachineBrowser" }, { 0x0c, 0x0195, "ALLANWANBrowser" }, { 0x0c, 0x0196, "ALInternetBrowser" }, { 0x0c, 0x0197, "ALRemoteNetworkingISPConnect" }, { 0x0c, 0x0198, "ALNetworkConference" }, { 0x0c, 0x0199, "ALNetworkChat" }, { 0x0c, 0x019a, "ALTelephonyDialer" }, { 0x0c, 0x019b, "ALLogon" }, { 0x0c, 0x019c, "ALLogoff" }, { 0x0c, 0x019d, "ALLogonLogoff" }, { 0x0c, 0x019e, "ALTerminalLockScreensaver" }, { 0x0c, 0x019f, "ALControlPanel" }, { 0x0c, 0x01a0, "ALCommandLineProcessorRun" }, { 0x0c, 0x01a1, "ALProcessTaskManager" }, { 0x0c, 0x01a2, "ALSelectTaskApplication" }, { 0x0c, 0x01a3, "ALNextTaskApplication" }, { 0x0c, 0x01a4, "ALPreviousTaskApplication" }, { 0x0c, 0x01a5, "ALPreemptiveHaltTaskApplication" }, { 0x0c, 0x01a6, "ALIntegratedHelpCenter" }, { 0x0c, 0x01a7, "ALDocuments" }, { 0x0c, 0x01a8, "ALThesaurus" }, { 0x0c, 0x01a9, "ALDictionary" }, { 0x0c, 0x01aa, "ALDesktop" }, { 0x0c, 0x01ab, "ALSpellCheck" }, { 0x0c, 0x01ac, "ALGrammarCheck" }, { 0x0c, 0x01ad, "ALWirelessStatus" }, { 0x0c, 0x01ae, "ALKeyboardLayout" }, { 0x0c, 0x01af, "ALVirusProtection" }, { 0x0c, 0x01b0, "ALEncryption" }, { 0x0c, 0x01b1, "ALScreenSaver" }, { 0x0c, 0x01b2, "ALAlarms" }, { 0x0c, 0x01b3, "ALClock" }, { 0x0c, 0x01b4, "ALFileBrowser" }, { 0x0c, 0x01b5, "ALPowerStatus" }, { 0x0c, 0x01b6, "ALImageBrowser" }, { 0x0c, 0x01b7, "ALAudioBrowser" }, { 0x0c, 0x01b8, "ALMovieBrowser" }, { 0x0c, 0x01b9, "ALDigitalRightsManager" }, { 0x0c, 0x01ba, "ALDigitalWallet" }, { 0x0c, 0x01bc, "ALInstantMessaging" }, { 0x0c, 0x01bd, "ALOEMFeaturesTipsTutorialBrowser" }, { 0x0c, 0x01be, "ALOEMHelp" }, { 0x0c, 0x01bf, "ALOnlineCommunity" }, { 0x0c, 0x01c0, "ALEntertainmentContentBrowser" }, { 0x0c, 0x01c1, "ALOnlineShoppingBrowser" }, { 0x0c, 0x01c2, "ALSmartCardInformationHelp" }, { 0x0c, 0x01c3, "ALMarketMonitorFinanceBrowser" }, { 0x0c, 0x01c4, "ALCustomizedCorporateNewsBrowser" }, { 0x0c, 0x01c5, "ALOnlineActivityBrowser" }, { 0x0c, 0x01c6, "ALResearchSearchBrowser" }, { 0x0c, 0x01c7, "ALAudioPlayer" }, { 0x0c, 0x01c8, "ALMessageStatus" }, { 0x0c, 0x01c9, "ALContactSync" }, { 0x0c, 0x01ca, "ALNavigation" }, { 0x0c, 0x01cb, "ALContextawareDesktopAssistant" }, { 0x0c, 0x0200, "GenericGUIApplicationControls" }, { 0x0c, 0x0201, "ACNew" }, { 0x0c, 0x0202, "ACOpen" }, { 0x0c, 0x0203, "ACClose" }, { 0x0c, 0x0204, "ACExit" }, { 0x0c, 0x0205, "ACMaximize" }, { 0x0c, 0x0206, "ACMinimize" }, { 0x0c, 0x0207, "ACSave" }, { 0x0c, 0x0208, "ACPrint" }, { 0x0c, 0x0209, "ACProperties" }, { 0x0c, 0x021a, "ACUndo" }, { 0x0c, 0x021b, "ACCopy" }, { 0x0c, 0x021c, "ACCut" }, { 0x0c, 0x021d, "ACPaste" }, { 0x0c, 0x021e, "ACSelectAll" }, { 0x0c, 0x021f, "ACFind" }, { 0x0c, 0x0220, "ACFindandReplace" }, { 0x0c, 0x0221, "ACSearch" }, { 0x0c, 0x0222, "ACGoTo" }, { 0x0c, 0x0223, "ACHome" }, { 0x0c, 0x0224, "ACBack" }, { 0x0c, 0x0225, "ACForward" }, { 0x0c, 0x0226, "ACStop" }, { 0x0c, 0x0227, "ACRefresh" }, { 0x0c, 0x0228, "ACPreviousLink" }, { 0x0c, 0x0229, "ACNextLink" }, { 0x0c, 0x022a, "ACBookmarks" }, { 0x0c, 0x022b, "ACHistory" }, { 0x0c, 0x022c, "ACSubscriptions" }, { 0x0c, 0x022d, "ACZoomIn" }, { 0x0c, 0x022e, "ACZoomOut" }, { 0x0c, 0x022f, "ACZoom" }, { 0x0c, 0x0230, "ACFullScreenView" }, { 0x0c, 0x0231, "ACNormalView" }, { 0x0c, 0x0232, "ACViewToggle" }, { 0x0c, 0x0233, "ACScrollUp" }, { 0x0c, 0x0234, "ACScrollDown" }, { 0x0c, 0x0235, "ACScroll" }, { 0x0c, 0x0236, "ACPanLeft" }, { 0x0c, 0x0237, "ACPanRight" }, { 0x0c, 0x0238, "ACPan" }, { 0x0c, 0x0239, "ACNewWindow" }, { 0x0c, 0x023a, "ACTileHorizontally" }, { 0x0c, 0x023b, "ACTileVertically" }, { 0x0c, 0x023c, "ACFormat" }, { 0x0c, 0x023d, "ACEdit" }, { 0x0c, 0x023e, "ACBold" }, { 0x0c, 0x023f, "ACItalics" }, { 0x0c, 0x0240, "ACUnderline" }, { 0x0c, 0x0241, "ACStrikethrough" }, { 0x0c, 0x0242, "ACSubscript" }, { 0x0c, 0x0243, "ACSuperscript" }, { 0x0c, 0x0244, "ACAllCaps" }, { 0x0c, 0x0245, "ACRotate" }, { 0x0c, 0x0246, "ACResize" }, { 0x0c, 0x0247, "ACFlipHorizontal" }, { 0x0c, 0x0248, "ACFlipVertical" }, { 0x0c, 0x0249, "ACMirrorHorizontal" }, { 0x0c, 0x024a, "ACMirrorVertical" }, { 0x0c, 0x024b, "ACFontSelect" }, { 0x0c, 0x024c, "ACFontColor" }, { 0x0c, 0x024d, "ACFontSize" }, { 0x0c, 0x024e, "ACJustifyLeft" }, { 0x0c, 0x024f, "ACJustifyCenterH" }, { 0x0c, 0x0250, "ACJustifyRight" }, { 0x0c, 0x0251, "ACJustifyBlockH" }, { 0x0c, 0x0252, "ACJustifyTop" }, { 0x0c, 0x0253, "ACJustifyCenterV" }, { 0x0c, 0x0254, "ACJustifyBottom" }, { 0x0c, 0x0255, "ACJustifyBlockV" }, { 0x0c, 0x0256, "ACIndentDecrease" }, { 0x0c, 0x0257, "ACIndentIncrease" }, { 0x0c, 0x0258, "ACNumberedList" }, { 0x0c, 0x0259, "ACRestartNumbering" }, { 0x0c, 0x025a, "ACBulletedList" }, { 0x0c, 0x025b, "ACPromote" }, { 0x0c, 0x025c, "ACDemote" }, { 0x0c, 0x025d, "ACYes" }, { 0x0c, 0x025e, "ACNo" }, { 0x0c, 0x025f, "ACCancel" }, { 0x0c, 0x0260, "ACCatalog" }, { 0x0c, 0x0261, "ACBuyCheckout" }, { 0x0c, 0x0262, "ACAddtoCart" }, { 0x0c, 0x0263, "ACExpand" }, { 0x0c, 0x0264, "ACExpandAll" }, { 0x0c, 0x0265, "ACCollapse" }, { 0x0c, 0x0266, "ACCollapseAll" }, { 0x0c, 0x0267, "ACPrintPreview" }, { 0x0c, 0x0268, "ACPasteSpecial" }, { 0x0c, 0x0269, "ACInsertMode" }, { 0x0c, 0x026a, "ACDelete" }, { 0x0c, 0x026b, "ACLock" }, { 0x0c, 0x026c, "ACUnlock" }, { 0x0c, 0x026d, "ACProtect" }, { 0x0c, 0x026e, "ACUnprotect" }, { 0x0c, 0x026f, "ACAttachComment" }, { 0x0c, 0x0270, "ACDeleteComment" }, { 0x0c, 0x0271, "ACViewComment" }, { 0x0c, 0x0272, "ACSelectWord" }, { 0x0c, 0x0273, "ACSelectSentence" }, { 0x0c, 0x0274, "ACSelectParagraph" }, { 0x0c, 0x0275, "ACSelectColumn" }, { 0x0c, 0x0276, "ACSelectRow" }, { 0x0c, 0x0277, "ACSelectTable" }, { 0x0c, 0x0278, "ACSelectObject" }, { 0x0c, 0x0279, "ACRedoRepeat" }, { 0x0c, 0x027a, "ACSort" }, { 0x0c, 0x027b, "ACSortAscending" }, { 0x0c, 0x027c, "ACSortDescending" }, { 0x0c, 0x027d, "ACFilter" }, { 0x0c, 0x027e, "ACSetClock" }, { 0x0c, 0x027f, "ACViewClock" }, { 0x0c, 0x0280, "ACSelectTimeZone" }, { 0x0c, 0x0281, "ACEditTimeZones" }, { 0x0c, 0x0282, "ACSetAlarm" }, { 0x0c, 0x0283, "ACClearAlarm" }, { 0x0c, 0x0284, "ACSnoozeAlarm" }, { 0x0c, 0x0285, "ACResetAlarm" }, { 0x0c, 0x0286, "ACSynchronize" }, { 0x0c, 0x0287, "ACSendReceive" }, { 0x0c, 0x0288, "ACSendTo" }, { 0x0c, 0x0289, "ACReply" }, { 0x0c, 0x028a, "ACReplyAll" }, { 0x0c, 0x028b, "ACForwardMsg" }, { 0x0c, 0x028c, "ACSend" }, { 0x0c, 0x028d, "ACAttachFile" }, { 0x0c, 0x028e, "ACUpload" }, { 0x0c, 0x028f, "ACDownloadSaveTargetAs" }, { 0x0c, 0x0290, "ACSetBorders" }, { 0x0c, 0x0291, "ACInsertRow" }, { 0x0c, 0x0292, "ACInsertColumn" }, { 0x0c, 0x0293, "ACInsertFile" }, { 0x0c, 0x0294, "ACInsertPicture" }, { 0x0c, 0x0295, "ACInsertObject" }, { 0x0c, 0x0296, "ACInsertSymbol" }, { 0x0c, 0x0297, "ACSaveandClose" }, { 0x0c, 0x0298, "ACRename" }, { 0x0c, 0x0299, "ACMerge" }, { 0x0c, 0x029a, "ACSplit" }, { 0x0c, 0x029b, "ACDisributeHorizontally" }, { 0x0c, 0x029c, "ACDistributeVertically" }, { 0x0c, 0x029d, "ACNextKeyboardLayoutSelect" }, { 0x0c, 0x029e, "ACNavigationGuidance" }, { 0x0c, 0x029f, "ACDesktopShowAllWindows" }, { 0x0c, 0x02a0, "ACSoftKeyLeft" }, { 0x0c, 0x02a1, "ACSoftKeyRight" }, { 0x0c, 0x02a2, "ACDesktopShowAllApplications" }, { 0x0c, 0x02b0, "ACIdleKeepAlive" }, { 0x0c, 0x02c0, "ExtendedKeyboardAttributesCollection" }, { 0x0c, 0x02c1, "KeyboardFormFactor" }, { 0x0c, 0x02c2, "KeyboardKeyType" }, { 0x0c, 0x02c3, "KeyboardPhysicalLayout" }, { 0x0c, 0x02c4, "VendorSpecificKeyboardPhysicalLayout" }, { 0x0c, 0x02c5, "KeyboardIETFLanguageTagIndex" }, { 0x0c, 0x02c6, "ImplementedKeyboardInputAssistControls" }, { 0x0c, 0x02c7, "KeyboardInputAssistPrevious" }, { 0x0c, 0x02c8, "KeyboardInputAssistNext" }, { 0x0c, 0x02c9, "KeyboardInputAssistPreviousGroup" }, { 0x0c, 0x02ca, "KeyboardInputAssistNextGroup" }, { 0x0c, 0x02cb, "KeyboardInputAssistAccept" }, { 0x0c, 0x02cc, "KeyboardInputAssistCancel" }, { 0x0c, 0x02d0, "PrivacyScreenToggle" }, { 0x0c, 0x02d1, "PrivacyScreenLevelDecrement" }, { 0x0c, 0x02d2, "PrivacyScreenLevelIncrement" }, { 0x0c, 0x02d3, "PrivacyScreenLevelMinimum" }, { 0x0c, 0x02d4, "PrivacyScreenLevelMaximum" }, { 0x0c, 0x0500, "ContactEdited" }, { 0x0c, 0x0501, "ContactAdded" }, { 0x0c, 0x0502, "ContactRecordActive" }, { 0x0c, 0x0503, "ContactIndex" }, { 0x0c, 0x0504, "ContactNickname" }, { 0x0c, 0x0505, "ContactFirstName" }, { 0x0c, 0x0506, "ContactLastName" }, { 0x0c, 0x0507, "ContactFullName" }, { 0x0c, 0x0508, "ContactPhoneNumberPersonal" }, { 0x0c, 0x0509, "ContactPhoneNumberBusiness" }, { 0x0c, 0x050a, "ContactPhoneNumberMobile" }, { 0x0c, 0x050b, "ContactPhoneNumberPager" }, { 0x0c, 0x050c, "ContactPhoneNumberFax" }, { 0x0c, 0x050d, "ContactPhoneNumberOther" }, { 0x0c, 0x050e, "ContactEmailPersonal" }, { 0x0c, 0x050f, "ContactEmailBusiness" }, { 0x0c, 0x0510, "ContactEmailOther" }, { 0x0c, 0x0511, "ContactEmailMain" }, { 0x0c, 0x0512, "ContactSpeedDialNumber" }, { 0x0c, 0x0513, "ContactStatusFlag" }, { 0x0c, 0x0514, "ContactMisc" }, { 0x0d, 0, "Digitizers" }, { 0x0d, 0x0001, "Digitizer" }, { 0x0d, 0x0002, "Pen" }, { 0x0d, 0x0003, "LightPen" }, { 0x0d, 0x0004, "TouchScreen" }, { 0x0d, 0x0005, "TouchPad" }, { 0x0d, 0x0006, "Whiteboard" }, { 0x0d, 0x0007, "CoordinateMeasuringMachine" }, { 0x0d, 0x0008, "3DDigitizer" }, { 0x0d, 0x0009, "StereoPlotter" }, { 0x0d, 0x000a, "ArticulatedArm" }, { 0x0d, 0x000b, "Armature" }, { 0x0d, 0x000c, "MultiplePointDigitizer" }, { 0x0d, 0x000d, "FreeSpaceWand" }, { 0x0d, 0x000e, "DeviceConfiguration" }, { 0x0d, 0x000f, "CapacitiveHeatMapDigitizer" }, { 0x0d, 0x0020, "Stylus" }, { 0x0d, 0x0021, "Puck" }, { 0x0d, 0x0022, "Finger" }, { 0x0d, 0x0023, "Devicesettings" }, { 0x0d, 0x0024, "CharacterGesture" }, { 0x0d, 0x0030, "TipPressure" }, { 0x0d, 0x0031, "BarrelPressure" }, { 0x0d, 0x0032, "InRange" }, { 0x0d, 0x0033, "Touch" }, { 0x0d, 0x0034, "Untouch" }, { 0x0d, 0x0035, "Tap" }, { 0x0d, 0x0036, "Quality" }, { 0x0d, 0x0037, "DataValid" }, { 0x0d, 0x0038, "TransducerIndex" }, { 0x0d, 0x0039, "TabletFunctionKeys" }, { 0x0d, 0x003a, "ProgramChangeKeys" }, { 0x0d, 0x003b, "BatteryStrength" }, { 0x0d, 0x003c, "Invert" }, { 0x0d, 0x003d, "XTilt" }, { 0x0d, 0x003e, "YTilt" }, { 0x0d, 0x003f, "Azimuth" }, { 0x0d, 0x0040, "Altitude" }, { 0x0d, 0x0041, "Twist" }, { 0x0d, 0x0042, "TipSwitch" }, { 0x0d, 0x0043, "SecondaryTipSwitch" }, { 0x0d, 0x0044, "BarrelSwitch" }, { 0x0d, 0x0045, "Eraser" }, { 0x0d, 0x0046, "TabletPick" }, { 0x0d, 0x0047, "TouchValid" }, { 0x0d, 0x0048, "Width" }, { 0x0d, 0x0049, "Height" }, { 0x0d, 0x0051, "ContactIdentifier" }, { 0x0d, 0x0052, "DeviceMode" }, { 0x0d, 0x0053, "DeviceIdentifier" }, { 0x0d, 0x0054, "ContactCount" }, { 0x0d, 0x0055, "ContactCountMaximum" }, { 0x0d, 0x0056, "ScanTime" }, { 0x0d, 0x0057, "SurfaceSwitch" }, { 0x0d, 0x0058, "ButtonSwitch" }, { 0x0d, 0x0059, "PadType" }, { 0x0d, 0x005a, "SecondaryBarrelSwitch" }, { 0x0d, 0x005b, "TransducerSerialNumber" }, { 0x0d, 0x005c, "PreferredColor" }, { 0x0d, 0x005d, "PreferredColorisLocked" }, { 0x0d, 0x005e, "PreferredLineWidth" }, { 0x0d, 0x005f, "PreferredLineWidthisLocked" }, { 0x0d, 0x0060, "LatencyMode" }, { 0x0d, 0x0061, "GestureCharacterQuality" }, { 0x0d, 0x0062, "CharacterGestureDataLength" }, { 0x0d, 0x0063, "CharacterGestureData" }, { 0x0d, 0x0064, "GestureCharacterEncoding" }, { 0x0d, 0x0065, "UTF8CharacterGestureEncoding" }, { 0x0d, 0x0066, "UTF16LittleEndianCharacterGestureEncoding" }, { 0x0d, 0x0067, "UTF16BigEndianCharacterGestureEncoding" }, { 0x0d, 0x0068, "UTF32LittleEndianCharacterGestureEncoding" }, { 0x0d, 0x0069, "UTF32BigEndianCharacterGestureEncoding" }, { 0x0d, 0x006a, "CapacitiveHeatMapProtocolVendorID" }, { 0x0d, 0x006b, "CapacitiveHeatMapProtocolVersion" }, { 0x0d, 0x006c, "CapacitiveHeatMapFrameData" }, { 0x0d, 0x006d, "GestureCharacterEnable" }, { 0x0d, 0x006e, "TransducerSerialNumberPart2" }, { 0x0d, 0x006f, "NoPreferredColor" }, { 0x0d, 0x0070, "PreferredLineStyle" }, { 0x0d, 0x0071, "PreferredLineStyleisLocked" }, { 0x0d, 0x0072, "Ink" }, { 0x0d, 0x0073, "Pencil" }, { 0x0d, 0x0074, "Highlighter" }, { 0x0d, 0x0075, "ChiselMarker" }, { 0x0d, 0x0076, "Brush" }, { 0x0d, 0x0077, "NoPreference" }, { 0x0d, 0x0080, "DigitizerDiagnostic" }, { 0x0d, 0x0081, "DigitizerError" }, { 0x0d, 0x0082, "ErrNormalStatus" }, { 0x0d, 0x0083, "ErrTransducersExceeded" }, { 0x0d, 0x0084, "ErrFullTransFeaturesUnavailable" }, { 0x0d, 0x0085, "ErrChargeLow" }, { 0x0d, 0x0090, "TransducerSoftwareInfo" }, { 0x0d, 0x0091, "TransducerVendorId" }, { 0x0d, 0x0092, "TransducerProductId" }, { 0x0d, 0x0093, "DeviceSupportedProtocols" }, { 0x0d, 0x0094, "TransducerSupportedProtocols" }, { 0x0d, 0x0095, "NoProtocol" }, { 0x0d, 0x0096, "WacomAESProtocol" }, { 0x0d, 0x0097, "USIProtocol" }, { 0x0d, 0x0098, "MicrosoftPenProtocol" }, { 0x0d, 0x00a0, "SupportedReportRates" }, { 0x0d, 0x00a1, "ReportRate" }, { 0x0d, 0x00a2, "TransducerConnected" }, { 0x0d, 0x00a3, "SwitchDisabled" }, { 0x0d, 0x00a4, "SwitchUnimplemented" }, { 0x0d, 0x00a5, "TransducerSwitches" }, { 0x0d, 0x00a6, "TransducerIndexSelector" }, { 0x0d, 0x00b0, "ButtonPressThreshold" }, { 0x0e, 0, "Haptics" }, { 0x0e, 0x0001, "SimpleHapticController" }, { 0x0e, 0x0010, "WaveformList" }, { 0x0e, 0x0011, "DurationList" }, { 0x0e, 0x0020, "AutoTrigger" }, { 0x0e, 0x0021, "ManualTrigger" }, { 0x0e, 0x0022, "AutoTriggerAssociatedControl" }, { 0x0e, 0x0023, "Intensity" }, { 0x0e, 0x0024, "RepeatCount" }, { 0x0e, 0x0025, "RetriggerPeriod" }, { 0x0e, 0x0026, "WaveformVendorPage" }, { 0x0e, 0x0027, "WaveformVendorID" }, { 0x0e, 0x0028, "WaveformCutoffTime" }, { 0x0e, 0x1001, "WaveformNone" }, { 0x0e, 0x1002, "WaveformStop" }, { 0x0e, 0x1003, "WaveformClick" }, { 0x0e, 0x1004, "WaveformBuzzContinuous" }, { 0x0e, 0x1005, "WaveformRumbleContinuous" }, { 0x0e, 0x1006, "WaveformPress" }, { 0x0e, 0x1007, "WaveformRelease" }, { 0x0e, 0x1008, "WaveformHover" }, { 0x0e, 0x1009, "WaveformSuccess" }, { 0x0e, 0x100a, "WaveformError" }, { 0x0e, 0x100b, "WaveformInkContinuous" }, { 0x0e, 0x100c, "WaveformPencilContinuous" }, { 0x0e, 0x100d, "WaveformMarkerContinuous" }, { 0x0e, 0x100e, "WaveformChiselMarkerContinuous" }, { 0x0e, 0x100f, "WaveformBrushContinuous" }, { 0x0e, 0x1010, "WaveformEraserContinuous" }, { 0x0e, 0x1011, "WaveformSparkleContinuous" }, { 0x0f, 0, "PhysicalInputDevice" }, { 0x0f, 0x0001, "PhysicalInputDevice" }, { 0x0f, 0x0020, "Normal" }, { 0x0f, 0x0021, "SetEffectReport" }, { 0x0f, 0x0022, "EffectParameterBlockIndex" }, { 0x0f, 0x0023, "ParameterBlockOffset" }, { 0x0f, 0x0024, "ROMFlag" }, { 0x0f, 0x0025, "EffectType" }, { 0x0f, 0x0026, "ETConstantForce" }, { 0x0f, 0x0027, "ETRamp" }, { 0x0f, 0x0028, "ETCustomForce" }, { 0x0f, 0x0030, "ETSquare" }, { 0x0f, 0x0031, "ETSine" }, { 0x0f, 0x0032, "ETTriangle" }, { 0x0f, 0x0033, "ETSawtoothUp" }, { 0x0f, 0x0034, "ETSawtoothDown" }, { 0x0f, 0x0040, "ETSpring" }, { 0x0f, 0x0041, "ETDamper" }, { 0x0f, 0x0042, "ETInertia" }, { 0x0f, 0x0043, "ETFriction" }, { 0x0f, 0x0050, "Duration" }, { 0x0f, 0x0051, "SamplePeriod" }, { 0x0f, 0x0052, "Gain" }, { 0x0f, 0x0053, "TriggerButton" }, { 0x0f, 0x0054, "TriggerRepeatInterval" }, { 0x0f, 0x0055, "AxesEnable" }, { 0x0f, 0x0056, "DirectionEnable" }, { 0x0f, 0x0057, "Direction" }, { 0x0f, 0x0058, "TypeSpecificBlockOffset" }, { 0x0f, 0x0059, "BlockType" }, { 0x0f, 0x005a, "SetEnvelopeReport" }, { 0x0f, 0x005b, "AttackLevel" }, { 0x0f, 0x005c, "AttackTime" }, { 0x0f, 0x005d, "FadeLevel" }, { 0x0f, 0x005e, "FadeTime" }, { 0x0f, 0x005f, "SetConditionReport" }, { 0x0f, 0x0060, "CenterPointOffset" }, { 0x0f, 0x0061, "PositiveCoefficient" }, { 0x0f, 0x0062, "NegativeCoefficient" }, { 0x0f, 0x0063, "PositiveSaturation" }, { 0x0f, 0x0064, "NegativeSaturation" }, { 0x0f, 0x0065, "DeadBand" }, { 0x0f, 0x0066, "DownloadForceSample" }, { 0x0f, 0x0067, "IsochCustomForceEnable" }, { 0x0f, 0x0068, "CustomForceDataReport" }, { 0x0f, 0x0069, "CustomForceData" }, { 0x0f, 0x006a, "CustomForceVendorDefinedData" }, { 0x0f, 0x006b, "SetCustomForceReport" }, { 0x0f, 0x006c, "CustomForceDataOffset" }, { 0x0f, 0x006d, "SampleCount" }, { 0x0f, 0x006e, "SetPeriodicReport" }, { 0x0f, 0x006f, "Offset" }, { 0x0f, 0x0070, "Magnitude" }, { 0x0f, 0x0071, "Phase" }, { 0x0f, 0x0072, "Period" }, { 0x0f, 0x0073, "SetConstantForceReport" }, { 0x0f, 0x0074, "SetRampForceReport" }, { 0x0f, 0x0075, "RampStart" }, { 0x0f, 0x0076, "RampEnd" }, { 0x0f, 0x0077, "EffectOperationReport" }, { 0x0f, 0x0078, "EffectOperation" }, { 0x0f, 0x0079, "OpEffectStart" }, { 0x0f, 0x007a, "OpEffectStartSolo" }, { 0x0f, 0x007b, "OpEffectStop" }, { 0x0f, 0x007c, "LoopCount" }, { 0x0f, 0x007d, "DeviceGainReport" }, { 0x0f, 0x007e, "DeviceGain" }, { 0x0f, 0x007f, "ParameterBlockPoolsReport" }, { 0x0f, 0x0080, "RAMPoolSize" }, { 0x0f, 0x0081, "ROMPoolSize" }, { 0x0f, 0x0082, "ROMEffectBlockCount" }, { 0x0f, 0x0083, "SimultaneousEffectsMax" }, { 0x0f, 0x0084, "PoolAlignment" }, { 0x0f, 0x0085, "ParameterBlockMoveReport" }, { 0x0f, 0x0086, "MoveSource" }, { 0x0f, 0x0087, "MoveDestination" }, { 0x0f, 0x0088, "MoveLength" }, { 0x0f, 0x0089, "EffectParameterBlockLoadReport" }, { 0x0f, 0x008b, "EffectParameterBlockLoadStatus" }, { 0x0f, 0x008c, "BlockLoadSuccess" }, { 0x0f, 0x008d, "BlockLoadFull" }, { 0x0f, 0x008e, "BlockLoadError" }, { 0x0f, 0x008f, "BlockHandle" }, { 0x0f, 0x0090, "EffectParameterBlockFreeReport" }, { 0x0f, 0x0091, "TypeSpecificBlockHandle" }, { 0x0f, 0x0092, "PIDStateReport" }, { 0x0f, 0x0094, "EffectPlaying" }, { 0x0f, 0x0095, "PIDDeviceControlReport" }, { 0x0f, 0x0096, "PIDDeviceControl" }, { 0x0f, 0x0097, "DCEnableActuators" }, { 0x0f, 0x0098, "DCDisableActuators" }, { 0x0f, 0x0099, "DCStopAllEffects" }, { 0x0f, 0x009a, "DCReset" }, { 0x0f, 0x009b, "DCPause" }, { 0x0f, 0x009c, "DCContinue" }, { 0x0f, 0x009f, "DevicePaused" }, { 0x0f, 0x00a0, "ActuatorsEnabled" }, { 0x0f, 0x00a4, "SafetySwitch" }, { 0x0f, 0x00a5, "ActuatorOverrideSwitch" }, { 0x0f, 0x00a6, "ActuatorPower" }, { 0x0f, 0x00a7, "StartDelay" }, { 0x0f, 0x00a8, "ParameterBlockSize" }, { 0x0f, 0x00a9, "DeviceManagedPool" }, { 0x0f, 0x00aa, "SharedParameterBlocks" }, { 0x0f, 0x00ab, "CreateNewEffectParameterBlockReport" }, { 0x0f, 0x00ac, "RAMPoolAvailable" }, { 0x11, 0, "SoC" }, { 0x11, 0x0001, "SocControl" }, { 0x11, 0x0002, "FirmwareTransfer" }, { 0x11, 0x0003, "FirmwareFileId" }, { 0x11, 0x0004, "FileOffsetInBytes" }, { 0x11, 0x0005, "FileTransferSizeMaxInBytes" }, { 0x11, 0x0006, "FilePayload" }, { 0x11, 0x0007, "FilePayloadSizeInBytes" }, { 0x11, 0x0008, "FilePayloadContainsLastBytes" }, { 0x11, 0x0009, "FileTransferStop" }, { 0x11, 0x000a, "FileTransferTillEnd" }, { 0x12, 0, "EyeandHeadTrackers" }, { 0x12, 0x0001, "EyeTracker" }, { 0x12, 0x0002, "HeadTracker" }, { 0x12, 0x0010, "TrackingData" }, { 0x12, 0x0011, "Capabilities" }, { 0x12, 0x0012, "Configuration" }, { 0x12, 0x0013, "Status" }, { 0x12, 0x0014, "Control" }, { 0x12, 0x0020, "SensorTimestamp" }, { 0x12, 0x0021, "PositionX" }, { 0x12, 0x0022, "PositionY" }, { 0x12, 0x0023, "PositionZ" }, { 0x12, 0x0024, "GazePoint" }, { 0x12, 0x0025, "LeftEyePosition" }, { 0x12, 0x0026, "RightEyePosition" }, { 0x12, 0x0027, "HeadPosition" }, { 0x12, 0x0028, "HeadDirectionPoint" }, { 0x12, 0x0029, "RotationaboutXaxis" }, { 0x12, 0x002a, "RotationaboutYaxis" }, { 0x12, 0x002b, "RotationaboutZaxis" }, { 0x12, 0x0100, "TrackerQuality" }, { 0x12, 0x0101, "MinimumTrackingDistance" }, { 0x12, 0x0102, "OptimumTrackingDistance" }, { 0x12, 0x0103, "MaximumTrackingDistance" }, { 0x12, 0x0104, "MaximumScreenPlaneWidth" }, { 0x12, 0x0105, "MaximumScreenPlaneHeight" }, { 0x12, 0x0200, "DisplayManufacturerID" }, { 0x12, 0x0201, "DisplayProductID" }, { 0x12, 0x0202, "DisplaySerialNumber" }, { 0x12, 0x0203, "DisplayManufacturerDate" }, { 0x12, 0x0204, "CalibratedScreenWidth" }, { 0x12, 0x0205, "CalibratedScreenHeight" }, { 0x12, 0x0300, "SamplingFrequency" }, { 0x12, 0x0301, "ConfigurationStatus" }, { 0x12, 0x0400, "DeviceModeRequest" }, { 0x14, 0, "AuxiliaryDisplay" }, { 0x14, 0x0001, "AlphanumericDisplay" }, { 0x14, 0x0002, "AuxiliaryDisplay" }, { 0x14, 0x0020, "DisplayAttributesReport" }, { 0x14, 0x0021, "ASCIICharacterSet" }, { 0x14, 0x0022, "DataReadBack" }, { 0x14, 0x0023, "FontReadBack" }, { 0x14, 0x0024, "DisplayControlReport" }, { 0x14, 0x0025, "ClearDisplay" }, { 0x14, 0x0026, "DisplayEnable" }, { 0x14, 0x0027, "ScreenSaverDelay" }, { 0x14, 0x0028, "ScreenSaverEnable" }, { 0x14, 0x0029, "VerticalScroll" }, { 0x14, 0x002a, "HorizontalScroll" }, { 0x14, 0x002b, "CharacterReport" }, { 0x14, 0x002c, "DisplayData" }, { 0x14, 0x002d, "DisplayStatus" }, { 0x14, 0x002e, "StatNotReady" }, { 0x14, 0x002f, "StatReady" }, { 0x14, 0x0030, "ErrNotaloadablecharacter" }, { 0x14, 0x0031, "ErrFontdatacannotberead" }, { 0x14, 0x0032, "CursorPositionReport" }, { 0x14, 0x0033, "Row" }, { 0x14, 0x0034, "Column" }, { 0x14, 0x0035, "Rows" }, { 0x14, 0x0036, "Columns" }, { 0x14, 0x0037, "CursorPixelPositioning" }, { 0x14, 0x0038, "CursorMode" }, { 0x14, 0x0039, "CursorEnable" }, { 0x14, 0x003a, "CursorBlink" }, { 0x14, 0x003b, "FontReport" }, { 0x14, 0x003c, "FontData" }, { 0x14, 0x003d, "CharacterWidth" }, { 0x14, 0x003e, "CharacterHeight" }, { 0x14, 0x003f, "CharacterSpacingHorizontal" }, { 0x14, 0x0040, "CharacterSpacingVertical" }, { 0x14, 0x0041, "UnicodeCharacterSet" }, { 0x14, 0x0042, "Font7Segment" }, { 0x14, 0x0043, "7SegmentDirectMap" }, { 0x14, 0x0044, "Font14Segment" }, { 0x14, 0x0045, "14SegmentDirectMap" }, { 0x14, 0x0046, "DisplayBrightness" }, { 0x14, 0x0047, "DisplayContrast" }, { 0x14, 0x0048, "CharacterAttribute" }, { 0x14, 0x0049, "AttributeReadback" }, { 0x14, 0x004a, "AttributeData" }, { 0x14, 0x004b, "CharAttrEnhance" }, { 0x14, 0x004c, "CharAttrUnderline" }, { 0x14, 0x004d, "CharAttrBlink" }, { 0x14, 0x0080, "BitmapSizeX" }, { 0x14, 0x0081, "BitmapSizeY" }, { 0x14, 0x0082, "MaxBlitSize" }, { 0x14, 0x0083, "BitDepthFormat" }, { 0x14, 0x0084, "DisplayOrientation" }, { 0x14, 0x0085, "PaletteReport" }, { 0x14, 0x0086, "PaletteDataSize" }, { 0x14, 0x0087, "PaletteDataOffset" }, { 0x14, 0x0088, "PaletteData" }, { 0x14, 0x008a, "BlitReport" }, { 0x14, 0x008b, "BlitRectangleX1" }, { 0x14, 0x008c, "BlitRectangleY1" }, { 0x14, 0x008d, "BlitRectangleX2" }, { 0x14, 0x008e, "BlitRectangleY2" }, { 0x14, 0x008f, "BlitData" }, { 0x14, 0x0090, "SoftButton" }, { 0x14, 0x0091, "SoftButtonID" }, { 0x14, 0x0092, "SoftButtonSide" }, { 0x14, 0x0093, "SoftButtonOffset1" }, { 0x14, 0x0094, "SoftButtonOffset2" }, { 0x14, 0x0095, "SoftButtonReport" }, { 0x14, 0x00c2, "SoftKeys" }, { 0x14, 0x00cc, "DisplayDataExtensions" }, { 0x14, 0x00cf, "CharacterMapping" }, { 0x14, 0x00dd, "UnicodeEquivalent" }, { 0x14, 0x00df, "CharacterPageMapping" }, { 0x14, 0x00ff, "RequestReport" }, { 0x20, 0, "Sensors" }, { 0x20, 0x0001, "Sensor" }, { 0x20, 0x0010, "Biometric" }, { 0x20, 0x0011, "BiometricHumanPresence" }, { 0x20, 0x0012, "BiometricHumanProximity" }, { 0x20, 0x0013, "BiometricHumanTouch" }, { 0x20, 0x0014, "BiometricBloodPressure" }, { 0x20, 0x0015, "BiometricBodyTemperature" }, { 0x20, 0x0016, "BiometricHeartRate" }, { 0x20, 0x0017, "BiometricHeartRateVariability" }, { 0x20, 0x0018, "BiometricPeripheralOxygenSaturation" }, { 0x20, 0x0019, "BiometricRespiratoryRate" }, { 0x20, 0x0020, "Electrical" }, { 0x20, 0x0021, "ElectricalCapacitance" }, { 0x20, 0x0022, "ElectricalCurrent" }, { 0x20, 0x0023, "ElectricalPower" }, { 0x20, 0x0024, "ElectricalInductance" }, { 0x20, 0x0025, "ElectricalResistance" }, { 0x20, 0x0026, "ElectricalVoltage" }, { 0x20, 0x0027, "ElectricalPotentiometer" }, { 0x20, 0x0028, "ElectricalFrequency" }, { 0x20, 0x0029, "ElectricalPeriod" }, { 0x20, 0x0030, "Environmental" }, { 0x20, 0x0031, "EnvironmentalAtmosphericPressure" }, { 0x20, 0x0032, "EnvironmentalHumidity" }, { 0x20, 0x0033, "EnvironmentalTemperature" }, { 0x20, 0x0034, "EnvironmentalWindDirection" }, { 0x20, 0x0035, "EnvironmentalWindSpeed" }, { 0x20, 0x0036, "EnvironmentalAirQuality" }, { 0x20, 0x0037, "EnvironmentalHeatIndex" }, { 0x20, 0x0038, "EnvironmentalSurfaceTemperature" }, { 0x20, 0x0039, "EnvironmentalVolatileOrganicCompounds" }, { 0x20, 0x003a, "EnvironmentalObjectPresence" }, { 0x20, 0x003b, "EnvironmentalObjectProximity" }, { 0x20, 0x0040, "Light" }, { 0x20, 0x0041, "LightAmbientLight" }, { 0x20, 0x0042, "LightConsumerInfrared" }, { 0x20, 0x0043, "LightInfraredLight" }, { 0x20, 0x0044, "LightVisibleLight" }, { 0x20, 0x0045, "LightUltravioletLight" }, { 0x20, 0x0050, "Location" }, { 0x20, 0x0051, "LocationBroadcast" }, { 0x20, 0x0052, "LocationDeadReckoning" }, { 0x20, 0x0053, "LocationGPSGlobalPositioningSystem" }, { 0x20, 0x0054, "LocationLookup" }, { 0x20, 0x0055, "LocationOther" }, { 0x20, 0x0056, "LocationStatic" }, { 0x20, 0x0057, "LocationTriangulation" }, { 0x20, 0x0060, "Mechanical" }, { 0x20, 0x0061, "MechanicalBooleanSwitch" }, { 0x20, 0x0062, "MechanicalBooleanSwitchArray" }, { 0x20, 0x0063, "MechanicalMultivalueSwitch" }, { 0x20, 0x0064, "MechanicalForce" }, { 0x20, 0x0065, "MechanicalPressure" }, { 0x20, 0x0066, "MechanicalStrain" }, { 0x20, 0x0067, "MechanicalWeight" }, { 0x20, 0x0068, "MechanicalHapticVibrator" }, { 0x20, 0x0069, "MechanicalHallEffectSwitch" }, { 0x20, 0x0070, "Motion" }, { 0x20, 0x0071, "MotionAccelerometer1D" }, { 0x20, 0x0072, "MotionAccelerometer2D" }, { 0x20, 0x0073, "MotionAccelerometer3D" }, { 0x20, 0x0074, "MotionGyrometer1D" }, { 0x20, 0x0075, "MotionGyrometer2D" }, { 0x20, 0x0076, "MotionGyrometer3D" }, { 0x20, 0x0077, "MotionMotionDetector" }, { 0x20, 0x0078, "MotionSpeedometer" }, { 0x20, 0x0079, "MotionAccelerometer" }, { 0x20, 0x007a, "MotionGyrometer" }, { 0x20, 0x007b, "MotionGravityVector" }, { 0x20, 0x007c, "MotionLinearAccelerometer" }, { 0x20, 0x0080, "Orientation" }, { 0x20, 0x0081, "OrientationCompass1D" }, { 0x20, 0x0082, "OrientationCompass2D" }, { 0x20, 0x0083, "OrientationCompass3D" }, { 0x20, 0x0084, "OrientationInclinometer1D" }, { 0x20, 0x0085, "OrientationInclinometer2D" }, { 0x20, 0x0086, "OrientationInclinometer3D" }, { 0x20, 0x0087, "OrientationDistance1D" }, { 0x20, 0x0088, "OrientationDistance2D" }, { 0x20, 0x0089, "OrientationDistance3D" }, { 0x20, 0x008a, "OrientationDeviceOrientation" }, { 0x20, 0x008b, "OrientationCompass" }, { 0x20, 0x008c, "OrientationInclinometer" }, { 0x20, 0x008d, "OrientationDistance" }, { 0x20, 0x008e, "OrientationRelativeOrientation" }, { 0x20, 0x008f, "OrientationSimpleOrientation" }, { 0x20, 0x0090, "Scanner" }, { 0x20, 0x0091, "ScannerBarcode" }, { 0x20, 0x0092, "ScannerRFID" }, { 0x20, 0x0093, "ScannerNFC" }, { 0x20, 0x00a0, "Time" }, { 0x20, 0x00a1, "TimeAlarmTimer" }, { 0x20, 0x00a2, "TimeRealTimeClock" }, { 0x20, 0x00b0, "PersonalActivity" }, { 0x20, 0x00b1, "PersonalActivityActivityDetection" }, { 0x20, 0x00b2, "PersonalActivityDevicePosition" }, { 0x20, 0x00b3, "PersonalActivityFloorTracker" }, { 0x20, 0x00b4, "PersonalActivityPedometer" }, { 0x20, 0x00b5, "PersonalActivityStepDetection" }, { 0x20, 0x00c0, "OrientationExtended" }, { 0x20, 0x00c1, "OrientationExtendedGeomagneticOrientation" }, { 0x20, 0x00c2, "OrientationExtendedMagnetometer" }, { 0x20, 0x00d0, "Gesture" }, { 0x20, 0x00d1, "GestureChassisFlipGesture" }, { 0x20, 0x00d2, "GestureHingeFoldGesture" }, { 0x20, 0x00e0, "Other" }, { 0x20, 0x00e1, "OtherCustom" }, { 0x20, 0x00e2, "OtherGeneric" }, { 0x20, 0x00e3, "OtherGenericEnumerator" }, { 0x20, 0x00e4, "OtherHingeAngle" }, { 0x20, 0x00f0, "VendorReserved1" }, { 0x20, 0x00f1, "VendorReserved2" }, { 0x20, 0x00f2, "VendorReserved3" }, { 0x20, 0x00f3, "VendorReserved4" }, { 0x20, 0x00f4, "VendorReserved5" }, { 0x20, 0x00f5, "VendorReserved6" }, { 0x20, 0x00f6, "VendorReserved7" }, { 0x20, 0x00f7, "VendorReserved8" }, { 0x20, 0x00f8, "VendorReserved9" }, { 0x20, 0x00f9, "VendorReserved10" }, { 0x20, 0x00fa, "VendorReserved11" }, { 0x20, 0x00fb, "VendorReserved12" }, { 0x20, 0x00fc, "VendorReserved13" }, { 0x20, 0x00fd, "VendorReserved14" }, { 0x20, 0x00fe, "VendorReserved15" }, { 0x20, 0x00ff, "VendorReserved16" }, { 0x20, 0x0200, "Event" }, { 0x20, 0x0201, "EventSensorState" }, { 0x20, 0x0202, "EventSensorEvent" }, { 0x20, 0x0300, "Property" }, { 0x20, 0x0301, "PropertyFriendlyName" }, { 0x20, 0x0302, "PropertyPersistentUniqueID" }, { 0x20, 0x0303, "PropertySensorStatus" }, { 0x20, 0x0304, "PropertyMinimumReportInterval" }, { 0x20, 0x0305, "PropertySensorManufacturer" }, { 0x20, 0x0306, "PropertySensorModel" }, { 0x20, 0x0307, "PropertySensorSerialNumber" }, { 0x20, 0x0308, "PropertySensorDescription" }, { 0x20, 0x0309, "PropertySensorConnectionType" }, { 0x20, 0x030a, "PropertySensorDevicePath" }, { 0x20, 0x030b, "PropertyHardwareRevision" }, { 0x20, 0x030c, "PropertyFirmwareVersion" }, { 0x20, 0x030d, "PropertyReleaseDate" }, { 0x20, 0x030e, "PropertyReportInterval" }, { 0x20, 0x030f, "PropertyChangeSensitivityAbsolute" }, { 0x20, 0x0310, "PropertyChangeSensitivityPercentofRange" }, { 0x20, 0x0311, "PropertyChangeSensitivityPercentRelative" }, { 0x20, 0x0312, "PropertyAccuracy" }, { 0x20, 0x0313, "PropertyResolution" }, { 0x20, 0x0314, "PropertyMaximum" }, { 0x20, 0x0315, "PropertyMinimum" }, { 0x20, 0x0316, "PropertyReportingState" }, { 0x20, 0x0317, "PropertySamplingRate" }, { 0x20, 0x0318, "PropertyResponseCurve" }, { 0x20, 0x0319, "PropertyPowerState" }, { 0x20, 0x031a, "PropertyMaximumFIFOEvents" }, { 0x20, 0x031b, "PropertyReportLatency" }, { 0x20, 0x031c, "PropertyFlushFIFOEvents" }, { 0x20, 0x031d, "PropertyMaximumPowerConsumption" }, { 0x20, 0x031e, "PropertyIsPrimary" }, { 0x20, 0x031f, "PropertyHumanPresenceDetectionType" }, { 0x20, 0x0400, "DataFieldLocation" }, { 0x20, 0x0402, "DataFieldAltitudeAntennaSeaLevel" }, { 0x20, 0x0403, "DataFieldDifferentialReferenceStationID" }, { 0x20, 0x0404, "DataFieldAltitudeEllipsoidError" }, { 0x20, 0x0405, "DataFieldAltitudeEllipsoid" }, { 0x20, 0x0406, "DataFieldAltitudeSeaLevelError" }, { 0x20, 0x0407, "DataFieldAltitudeSeaLevel" }, { 0x20, 0x0408, "DataFieldDifferentialGPSDataAge" }, { 0x20, 0x0409, "DataFieldErrorRadius" }, { 0x20, 0x040a, "DataFieldFixQuality" }, { 0x20, 0x040b, "DataFieldFixType" }, { 0x20, 0x040c, "DataFieldGeoidalSeparation" }, { 0x20, 0x040d, "DataFieldGPSOperationMode" }, { 0x20, 0x040e, "DataFieldGPSSelectionMode" }, { 0x20, 0x040f, "DataFieldGPSStatus" }, { 0x20, 0x0410, "DataFieldPositionDilutionofPrecision" }, { 0x20, 0x0411, "DataFieldHorizontalDilutionofPrecision" }, { 0x20, 0x0412, "DataFieldVerticalDilutionofPrecision" }, { 0x20, 0x0413, "DataFieldLatitude" }, { 0x20, 0x0414, "DataFieldLongitude" }, { 0x20, 0x0415, "DataFieldTrueHeading" }, { 0x20, 0x0416, "DataFieldMagneticHeading" }, { 0x20, 0x0417, "DataFieldMagneticVariation" }, { 0x20, 0x0418, "DataFieldSpeed" }, { 0x20, 0x0419, "DataFieldSatellitesinView" }, { 0x20, 0x041a, "DataFieldSatellitesinViewAzimuth" }, { 0x20, 0x041b, "DataFieldSatellitesinViewElevation" }, { 0x20, 0x041c, "DataFieldSatellitesinViewIDs" }, { 0x20, 0x041d, "DataFieldSatellitesinViewPRNs" }, { 0x20, 0x041e, "DataFieldSatellitesinViewSNRatios" }, { 0x20, 0x041f, "DataFieldSatellitesUsedCount" }, { 0x20, 0x0420, "DataFieldSatellitesUsedPRNs" }, { 0x20, 0x0421, "DataFieldNMEASentence" }, { 0x20, 0x0422, "DataFieldAddressLine1" }, { 0x20, 0x0423, "DataFieldAddressLine2" }, { 0x20, 0x0424, "DataFieldCity" }, { 0x20, 0x0425, "DataFieldStateorProvince" }, { 0x20, 0x0426, "DataFieldCountryorRegion" }, { 0x20, 0x0427, "DataFieldPostalCode" }, { 0x20, 0x042a, "PropertyLocation" }, { 0x20, 0x042b, "PropertyLocationDesiredAccuracy" }, { 0x20, 0x0430, "DataFieldEnvironmental" }, { 0x20, 0x0431, "DataFieldAtmosphericPressure" }, { 0x20, 0x0433, "DataFieldRelativeHumidity" }, { 0x20, 0x0434, "DataFieldTemperature" }, { 0x20, 0x0435, "DataFieldWindDirection" }, { 0x20, 0x0436, "DataFieldWindSpeed" }, { 0x20, 0x0437, "DataFieldAirQualityIndex" }, { 0x20, 0x0438, "DataFieldEquivalentCO2" }, { 0x20, 0x0439, "DataFieldVolatileOrganicCompoundConcentration" }, { 0x20, 0x043a, "DataFieldObjectPresence" }, { 0x20, 0x043b, "DataFieldObjectProximityRange" }, { 0x20, 0x043c, "DataFieldObjectProximityOutofRange" }, { 0x20, 0x0440, "PropertyEnvironmental" }, { 0x20, 0x0441, "PropertyReferencePressure" }, { 0x20, 0x0450, "DataFieldMotion" }, { 0x20, 0x0451, "DataFieldMotionState" }, { 0x20, 0x0452, "DataFieldAcceleration" }, { 0x20, 0x0453, "DataFieldAccelerationAxisX" }, { 0x20, 0x0454, "DataFieldAccelerationAxisY" }, { 0x20, 0x0455, "DataFieldAccelerationAxisZ" }, { 0x20, 0x0456, "DataFieldAngularVelocity" }, { 0x20, 0x0457, "DataFieldAngularVelocityaboutXAxis" }, { 0x20, 0x0458, "DataFieldAngularVelocityaboutYAxis" }, { 0x20, 0x0459, "DataFieldAngularVelocityaboutZAxis" }, { 0x20, 0x045a, "DataFieldAngularPosition" }, { 0x20, 0x045b, "DataFieldAngularPositionaboutXAxis" }, { 0x20, 0x045c, "DataFieldAngularPositionaboutYAxis" }, { 0x20, 0x045d, "DataFieldAngularPositionaboutZAxis" }, { 0x20, 0x045e, "DataFieldMotionSpeed" }, { 0x20, 0x045f, "DataFieldMotionIntensity" }, { 0x20, 0x0470, "DataFieldOrientation" }, { 0x20, 0x0471, "DataFieldHeading" }, { 0x20, 0x0472, "DataFieldHeadingXAxis" }, { 0x20, 0x0473, "DataFieldHeadingYAxis" }, { 0x20, 0x0474, "DataFieldHeadingZAxis" }, { 0x20, 0x0475, "DataFieldHeadingCompensatedMagneticNorth" }, { 0x20, 0x0476, "DataFieldHeadingCompensatedTrueNorth" }, { 0x20, 0x0477, "DataFieldHeadingMagneticNorth" }, { 0x20, 0x0478, "DataFieldHeadingTrueNorth" }, { 0x20, 0x0479, "DataFieldDistance" }, { 0x20, 0x047a, "DataFieldDistanceXAxis" }, { 0x20, 0x047b, "DataFieldDistanceYAxis" }, { 0x20, 0x047c, "DataFieldDistanceZAxis" }, { 0x20, 0x047d, "DataFieldDistanceOutofRange" }, { 0x20, 0x047e, "DataFieldTilt" }, { 0x20, 0x047f, "DataFieldTiltXAxis" }, { 0x20, 0x0480, "DataFieldTiltYAxis" }, { 0x20, 0x0481, "DataFieldTiltZAxis" }, { 0x20, 0x0482, "DataFieldRotationMatrix" }, { 0x20, 0x0483, "DataFieldQuaternion" }, { 0x20, 0x0484, "DataFieldMagneticFlux" }, { 0x20, 0x0485, "DataFieldMagneticFluxXAxis" }, { 0x20, 0x0486, "DataFieldMagneticFluxYAxis" }, { 0x20, 0x0487, "DataFieldMagneticFluxZAxis" }, { 0x20, 0x0488, "DataFieldMagnetometerAccuracy" }, { 0x20, 0x0489, "DataFieldSimpleOrientationDirection" }, { 0x20, 0x0490, "DataFieldMechanical" }, { 0x20, 0x0491, "DataFieldBooleanSwitchState" }, { 0x20, 0x0492, "DataFieldBooleanSwitchArrayStates" }, { 0x20, 0x0493, "DataFieldMultivalueSwitchValue" }, { 0x20, 0x0494, "DataFieldForce" }, { 0x20, 0x0495, "DataFieldAbsolutePressure" }, { 0x20, 0x0496, "DataFieldGaugePressure" }, { 0x20, 0x0497, "DataFieldStrain" }, { 0x20, 0x0498, "DataFieldWeight" }, { 0x20, 0x04a0, "PropertyMechanical" }, { 0x20, 0x04a1, "PropertyVibrationState" }, { 0x20, 0x04a2, "PropertyForwardVibrationSpeed" }, { 0x20, 0x04a3, "PropertyBackwardVibrationSpeed" }, { 0x20, 0x04b0, "DataFieldBiometric" }, { 0x20, 0x04b1, "DataFieldHumanPresence" }, { 0x20, 0x04b2, "DataFieldHumanProximityRange" }, { 0x20, 0x04b3, "DataFieldHumanProximityOutofRange" }, { 0x20, 0x04b4, "DataFieldHumanTouchState" }, { 0x20, 0x04b5, "DataFieldBloodPressure" }, { 0x20, 0x04b6, "DataFieldBloodPressureDiastolic" }, { 0x20, 0x04b7, "DataFieldBloodPressureSystolic" }, { 0x20, 0x04b8, "DataFieldHeartRate" }, { 0x20, 0x04b9, "DataFieldRestingHeartRate" }, { 0x20, 0x04ba, "DataFieldHeartbeatInterval" }, { 0x20, 0x04bb, "DataFieldRespiratoryRate" }, { 0x20, 0x04bc, "DataFieldSpO2" }, { 0x20, 0x04bd, "DataFieldHumanAttentionDetected" }, { 0x20, 0x04be, "DataFieldHumanHeadAzimuth" }, { 0x20, 0x04bf, "DataFieldHumanHeadAltitude" }, { 0x20, 0x04c0, "DataFieldHumanHeadRoll" }, { 0x20, 0x04c1, "DataFieldHumanHeadPitch" }, { 0x20, 0x04c2, "DataFieldHumanHeadYaw" }, { 0x20, 0x04c3, "DataFieldHumanCorrelationId" }, { 0x20, 0x04d0, "DataFieldLight" }, { 0x20, 0x04d1, "DataFieldIlluminance" }, { 0x20, 0x04d2, "DataFieldColorTemperature" }, { 0x20, 0x04d3, "DataFieldChromaticity" }, { 0x20, 0x04d4, "DataFieldChromaticityX" }, { 0x20, 0x04d5, "DataFieldChromaticityY" }, { 0x20, 0x04d6, "DataFieldConsumerIRSentenceReceive" }, { 0x20, 0x04d7, "DataFieldInfraredLight" }, { 0x20, 0x04d8, "DataFieldRedLight" }, { 0x20, 0x04d9, "DataFieldGreenLight" }, { 0x20, 0x04da, "DataFieldBlueLight" }, { 0x20, 0x04db, "DataFieldUltravioletALight" }, { 0x20, 0x04dc, "DataFieldUltravioletBLight" }, { 0x20, 0x04dd, "DataFieldUltravioletIndex" }, { 0x20, 0x04de, "DataFieldNearInfraredLight" }, { 0x20, 0x04df, "PropertyLight" }, { 0x20, 0x04e0, "PropertyConsumerIRSentenceSend" }, { 0x20, 0x04e2, "PropertyAutoBrightnessPreferred" }, { 0x20, 0x04e3, "PropertyAutoColorPreferred" }, { 0x20, 0x04f0, "DataFieldScanner" }, { 0x20, 0x04f1, "DataFieldRFIDTag40Bit" }, { 0x20, 0x04f2, "DataFieldNFCSentenceReceive" }, { 0x20, 0x04f8, "PropertyScanner" }, { 0x20, 0x04f9, "PropertyNFCSentenceSend" }, { 0x20, 0x0500, "DataFieldElectrical" }, { 0x20, 0x0501, "DataFieldCapacitance" }, { 0x20, 0x0502, "DataFieldCurrent" }, { 0x20, 0x0503, "DataFieldElectricalPower" }, { 0x20, 0x0504, "DataFieldInductance" }, { 0x20, 0x0505, "DataFieldResistance" }, { 0x20, 0x0506, "DataFieldVoltage" }, { 0x20, 0x0507, "DataFieldFrequency" }, { 0x20, 0x0508, "DataFieldPeriod" }, { 0x20, 0x0509, "DataFieldPercentofRange" }, { 0x20, 0x0520, "DataFieldTime" }, { 0x20, 0x0521, "DataFieldYear" }, { 0x20, 0x0522, "DataFieldMonth" }, { 0x20, 0x0523, "DataFieldDay" }, { 0x20, 0x0524, "DataFieldDayofWeek" }, { 0x20, 0x0525, "DataFieldHour" }, { 0x20, 0x0526, "DataFieldMinute" }, { 0x20, 0x0527, "DataFieldSecond" }, { 0x20, 0x0528, "DataFieldMillisecond" }, { 0x20, 0x0529, "DataFieldTimestamp" }, { 0x20, 0x052a, "DataFieldJulianDayofYear" }, { 0x20, 0x052b, "DataFieldTimeSinceSystemBoot" }, { 0x20, 0x0530, "PropertyTime" }, { 0x20, 0x0531, "PropertyTimeZoneOffsetfromUTC" }, { 0x20, 0x0532, "PropertyTimeZoneName" }, { 0x20, 0x0533, "PropertyDaylightSavingsTimeObserved" }, { 0x20, 0x0534, "PropertyTimeTrimAdjustment" }, { 0x20, 0x0535, "PropertyArmAlarm" }, { 0x20, 0x0540, "DataFieldCustom" }, { 0x20, 0x0541, "DataFieldCustomUsage" }, { 0x20, 0x0542, "DataFieldCustomBooleanArray" }, { 0x20, 0x0543, "DataFieldCustomValue" }, { 0x20, 0x0544, "DataFieldCustomValue1" }, { 0x20, 0x0545, "DataFieldCustomValue2" }, { 0x20, 0x0546, "DataFieldCustomValue3" }, { 0x20, 0x0547, "DataFieldCustomValue4" }, { 0x20, 0x0548, "DataFieldCustomValue5" }, { 0x20, 0x0549, "DataFieldCustomValue6" }, { 0x20, 0x054a, "DataFieldCustomValue7" }, { 0x20, 0x054b, "DataFieldCustomValue8" }, { 0x20, 0x054c, "DataFieldCustomValue9" }, { 0x20, 0x054d, "DataFieldCustomValue10" }, { 0x20, 0x054e, "DataFieldCustomValue11" }, { 0x20, 0x054f, "DataFieldCustomValue12" }, { 0x20, 0x0550, "DataFieldCustomValue13" }, { 0x20, 0x0551, "DataFieldCustomValue14" }, { 0x20, 0x0552, "DataFieldCustomValue15" }, { 0x20, 0x0553, "DataFieldCustomValue16" }, { 0x20, 0x0554, "DataFieldCustomValue17" }, { 0x20, 0x0555, "DataFieldCustomValue18" }, { 0x20, 0x0556, "DataFieldCustomValue19" }, { 0x20, 0x0557, "DataFieldCustomValue20" }, { 0x20, 0x0558, "DataFieldCustomValue21" }, { 0x20, 0x0559, "DataFieldCustomValue22" }, { 0x20, 0x055a, "DataFieldCustomValue23" }, { 0x20, 0x055b, "DataFieldCustomValue24" }, { 0x20, 0x055c, "DataFieldCustomValue25" }, { 0x20, 0x055d, "DataFieldCustomValue26" }, { 0x20, 0x055e, "DataFieldCustomValue27" }, { 0x20, 0x055f, "DataFieldCustomValue28" }, { 0x20, 0x0560, "DataFieldGeneric" }, { 0x20, 0x0561, "DataFieldGenericGUIDorPROPERTYKEY" }, { 0x20, 0x0562, "DataFieldGenericCategoryGUID" }, { 0x20, 0x0563, "DataFieldGenericTypeGUID" }, { 0x20, 0x0564, "DataFieldGenericEventPROPERTYKEY" }, { 0x20, 0x0565, "DataFieldGenericPropertyPROPERTYKEY" }, { 0x20, 0x0566, "DataFieldGenericDataFieldPROPERTYKEY" }, { 0x20, 0x0567, "DataFieldGenericEvent" }, { 0x20, 0x0568, "DataFieldGenericProperty" }, { 0x20, 0x0569, "DataFieldGenericDataField" }, { 0x20, 0x056a, "DataFieldEnumeratorTableRowIndex" }, { 0x20, 0x056b, "DataFieldEnumeratorTableRowCount" }, { 0x20, 0x056c, "DataFieldGenericGUIDorPROPERTYKEYkind" }, { 0x20, 0x056d, "DataFieldGenericGUID" }, { 0x20, 0x056e, "DataFieldGenericPROPERTYKEY" }, { 0x20, 0x056f, "DataFieldGenericTopLevelCollectionID" }, { 0x20, 0x0570, "DataFieldGenericReportID" }, { 0x20, 0x0571, "DataFieldGenericReportItemPositionIndex" }, { 0x20, 0x0572, "DataFieldGenericFirmwareVARTYPE" }, { 0x20, 0x0573, "DataFieldGenericUnitofMeasure" }, { 0x20, 0x0574, "DataFieldGenericUnitExponent" }, { 0x20, 0x0575, "DataFieldGenericReportSize" }, { 0x20, 0x0576, "DataFieldGenericReportCount" }, { 0x20, 0x0580, "PropertyGeneric" }, { 0x20, 0x0581, "PropertyEnumeratorTableRowIndex" }, { 0x20, 0x0582, "PropertyEnumeratorTableRowCount" }, { 0x20, 0x0590, "DataFieldPersonalActivity" }, { 0x20, 0x0591, "DataFieldActivityType" }, { 0x20, 0x0592, "DataFieldActivityState" }, { 0x20, 0x0593, "DataFieldDevicePosition" }, { 0x20, 0x0594, "DataFieldStepCount" }, { 0x20, 0x0595, "DataFieldStepCountReset" }, { 0x20, 0x0596, "DataFieldStepDuration" }, { 0x20, 0x0597, "DataFieldStepType" }, { 0x20, 0x05a0, "PropertyMinimumActivityDetectionInterval" }, { 0x20, 0x05a1, "PropertySupportedActivityTypes" }, { 0x20, 0x05a2, "PropertySubscribedActivityTypes" }, { 0x20, 0x05a3, "PropertySupportedStepTypes" }, { 0x20, 0x05a4, "PropertySubscribedStepTypes" }, { 0x20, 0x05a5, "PropertyFloorHeight" }, { 0x20, 0x05b0, "DataFieldCustomTypeID" }, { 0x20, 0x05c0, "PropertyCustom" }, { 0x20, 0x05c1, "PropertyCustomValue1" }, { 0x20, 0x05c2, "PropertyCustomValue2" }, { 0x20, 0x05c3, "PropertyCustomValue3" }, { 0x20, 0x05c4, "PropertyCustomValue4" }, { 0x20, 0x05c5, "PropertyCustomValue5" }, { 0x20, 0x05c6, "PropertyCustomValue6" }, { 0x20, 0x05c7, "PropertyCustomValue7" }, { 0x20, 0x05c8, "PropertyCustomValue8" }, { 0x20, 0x05c9, "PropertyCustomValue9" }, { 0x20, 0x05ca, "PropertyCustomValue10" }, { 0x20, 0x05cb, "PropertyCustomValue11" }, { 0x20, 0x05cc, "PropertyCustomValue12" }, { 0x20, 0x05cd, "PropertyCustomValue13" }, { 0x20, 0x05ce, "PropertyCustomValue14" }, { 0x20, 0x05cf, "PropertyCustomValue15" }, { 0x20, 0x05d0, "PropertyCustomValue16" }, { 0x20, 0x05e0, "DataFieldHinge" }, { 0x20, 0x05e1, "DataFieldHingeAngle" }, { 0x20, 0x05f0, "DataFieldGestureSensor" }, { 0x20, 0x05f1, "DataFieldGestureState" }, { 0x20, 0x05f2, "DataFieldHingeFoldInitialAngle" }, { 0x20, 0x05f3, "DataFieldHingeFoldFinalAngle" }, { 0x20, 0x05f4, "DataFieldHingeFoldContributingPanel" }, { 0x20, 0x05f5, "DataFieldHingeFoldType" }, { 0x20, 0x0800, "SensorStateUndefined" }, { 0x20, 0x0801, "SensorStateReady" }, { 0x20, 0x0802, "SensorStateNotAvailable" }, { 0x20, 0x0803, "SensorStateNoData" }, { 0x20, 0x0804, "SensorStateInitializing" }, { 0x20, 0x0805, "SensorStateAccessDenied" }, { 0x20, 0x0806, "SensorStateError" }, { 0x20, 0x0810, "SensorEventUnknown" }, { 0x20, 0x0811, "SensorEventStateChanged" }, { 0x20, 0x0812, "SensorEventPropertyChanged" }, { 0x20, 0x0813, "SensorEventDataUpdated" }, { 0x20, 0x0814, "SensorEventPollResponse" }, { 0x20, 0x0815, "SensorEventChangeSensitivity" }, { 0x20, 0x0816, "SensorEventRangeMaximumReached" }, { 0x20, 0x0817, "SensorEventRangeMinimumReached" }, { 0x20, 0x0818, "SensorEventHighThresholdCrossUpward" }, { 0x20, 0x0819, "SensorEventHighThresholdCrossDownward" }, { 0x20, 0x081a, "SensorEventLowThresholdCrossUpward" }, { 0x20, 0x081b, "SensorEventLowThresholdCrossDownward" }, { 0x20, 0x081c, "SensorEventZeroThresholdCrossUpward" }, { 0x20, 0x081d, "SensorEventZeroThresholdCrossDownward" }, { 0x20, 0x081e, "SensorEventPeriodExceeded" }, { 0x20, 0x081f, "SensorEventFrequencyExceeded" }, { 0x20, 0x0820, "SensorEventComplexTrigger" }, { 0x20, 0x0830, "ConnectionTypePCIntegrated" }, { 0x20, 0x0831, "ConnectionTypePCAttached" }, { 0x20, 0x0832, "ConnectionTypePCExternal" }, { 0x20, 0x0840, "ReportingStateReportNoEvents" }, { 0x20, 0x0841, "ReportingStateReportAllEvents" }, { 0x20, 0x0842, "ReportingStateReportThresholdEvents" }, { 0x20, 0x0843, "ReportingStateWakeOnNoEvents" }, { 0x20, 0x0844, "ReportingStateWakeOnAllEvents" }, { 0x20, 0x0845, "ReportingStateWakeOnThresholdEvents" }, { 0x20, 0x0846, "ReportingStateAnytime" }, { 0x20, 0x0850, "PowerStateUndefined" }, { 0x20, 0x0851, "PowerStateD0FullPower" }, { 0x20, 0x0852, "PowerStateD1LowPower" }, { 0x20, 0x0853, "PowerStateD2StandbyPowerwithWakeup" }, { 0x20, 0x0854, "PowerStateD3SleepwithWakeup" }, { 0x20, 0x0855, "PowerStateD4PowerOff" }, { 0x20, 0x0860, "AccuracyDefault" }, { 0x20, 0x0861, "AccuracyHigh" }, { 0x20, 0x0862, "AccuracyMedium" }, { 0x20, 0x0863, "AccuracyLow" }, { 0x20, 0x0870, "FixQualityNoFix" }, { 0x20, 0x0871, "FixQualityGPS" }, { 0x20, 0x0872, "FixQualityDGPS" }, { 0x20, 0x0880, "FixTypeNoFix" }, { 0x20, 0x0881, "FixTypeGPSSPSModeFixValid" }, { 0x20, 0x0882, "FixTypeDGPSSPSModeFixValid" }, { 0x20, 0x0883, "FixTypeGPSPPSModeFixValid" }, { 0x20, 0x0884, "FixTypeRealTimeKinematic" }, { 0x20, 0x0885, "FixTypeFloatRTK" }, { 0x20, 0x0886, "FixTypeEstimateddeadreckoned" }, { 0x20, 0x0887, "FixTypeManualInputMode" }, { 0x20, 0x0888, "FixTypeSimulatorMode" }, { 0x20, 0x0890, "GPSOperationModeManual" }, { 0x20, 0x0891, "GPSOperationModeAutomatic" }, { 0x20, 0x08a0, "GPSSelectionModeAutonomous" }, { 0x20, 0x08a1, "GPSSelectionModeDGPS" }, { 0x20, 0x08a2, "GPSSelectionModeEstimateddeadreckoned" }, { 0x20, 0x08a3, "GPSSelectionModeManualInput" }, { 0x20, 0x08a4, "GPSSelectionModeSimulator" }, { 0x20, 0x08a5, "GPSSelectionModeDataNotValid" }, { 0x20, 0x08b0, "GPSStatusDataValid" }, { 0x20, 0x08b1, "GPSStatusDataNotValid" }, { 0x20, 0x08c0, "DayofWeekSunday" }, { 0x20, 0x08c1, "DayofWeekMonday" }, { 0x20, 0x08c2, "DayofWeekTuesday" }, { 0x20, 0x08c3, "DayofWeekWednesday" }, { 0x20, 0x08c4, "DayofWeekThursday" }, { 0x20, 0x08c5, "DayofWeekFriday" }, { 0x20, 0x08c6, "DayofWeekSaturday" }, { 0x20, 0x08d0, "KindCategory" }, { 0x20, 0x08d1, "KindType" }, { 0x20, 0x08d2, "KindEvent" }, { 0x20, 0x08d3, "KindProperty" }, { 0x20, 0x08d4, "KindDataField" }, { 0x20, 0x08e0, "MagnetometerAccuracyLow" }, { 0x20, 0x08e1, "MagnetometerAccuracyMedium" }, { 0x20, 0x08e2, "MagnetometerAccuracyHigh" }, { 0x20, 0x08f0, "SimpleOrientationDirectionNotRotated" }, { 0x20, 0x08f1, "SimpleOrientationDirectionRotated90DegreesCCW" }, { 0x20, 0x08f2, "SimpleOrientationDirectionRotated180DegreesCCW" }, { 0x20, 0x08f3, "SimpleOrientationDirectionRotated270DegreesCCW" }, { 0x20, 0x08f4, "SimpleOrientationDirectionFaceUp" }, { 0x20, 0x08f5, "SimpleOrientationDirectionFaceDown" }, { 0x20, 0x0900, "VT_NULL" }, { 0x20, 0x0901, "VT_BOOL" }, { 0x20, 0x0902, "VT_UI1" }, { 0x20, 0x0903, "VT_I1" }, { 0x20, 0x0904, "VT_UI2" }, { 0x20, 0x0905, "VT_I2" }, { 0x20, 0x0906, "VT_UI4" }, { 0x20, 0x0907, "VT_I4" }, { 0x20, 0x0908, "VT_UI8" }, { 0x20, 0x0909, "VT_I8" }, { 0x20, 0x090a, "VT_R4" }, { 0x20, 0x090b, "VT_R8" }, { 0x20, 0x090c, "VT_WSTR" }, { 0x20, 0x090d, "VT_STR" }, { 0x20, 0x090e, "VT_CLSID" }, { 0x20, 0x090f, "VT_VECTORVT_UI1" }, { 0x20, 0x0910, "VT_F16E0" }, { 0x20, 0x0911, "VT_F16E1" }, { 0x20, 0x0912, "VT_F16E2" }, { 0x20, 0x0913, "VT_F16E3" }, { 0x20, 0x0914, "VT_F16E4" }, { 0x20, 0x0915, "VT_F16E5" }, { 0x20, 0x0916, "VT_F16E6" }, { 0x20, 0x0917, "VT_F16E7" }, { 0x20, 0x0918, "VT_F16E8" }, { 0x20, 0x0919, "VT_F16E9" }, { 0x20, 0x091a, "VT_F16EA" }, { 0x20, 0x091b, "VT_F16EB" }, { 0x20, 0x091c, "VT_F16EC" }, { 0x20, 0x091d, "VT_F16ED" }, { 0x20, 0x091e, "VT_F16EE" }, { 0x20, 0x091f, "VT_F16EF" }, { 0x20, 0x0920, "VT_F32E0" }, { 0x20, 0x0921, "VT_F32E1" }, { 0x20, 0x0922, "VT_F32E2" }, { 0x20, 0x0923, "VT_F32E3" }, { 0x20, 0x0924, "VT_F32E4" }, { 0x20, 0x0925, "VT_F32E5" }, { 0x20, 0x0926, "VT_F32E6" }, { 0x20, 0x0927, "VT_F32E7" }, { 0x20, 0x0928, "VT_F32E8" }, { 0x20, 0x0929, "VT_F32E9" }, { 0x20, 0x092a, "VT_F32EA" }, { 0x20, 0x092b, "VT_F32EB" }, { 0x20, 0x092c, "VT_F32EC" }, { 0x20, 0x092d, "VT_F32ED" }, { 0x20, 0x092e, "VT_F32EE" }, { 0x20, 0x092f, "VT_F32EF" }, { 0x20, 0x0930, "ActivityTypeUnknown" }, { 0x20, 0x0931, "ActivityTypeStationary" }, { 0x20, 0x0932, "ActivityTypeFidgeting" }, { 0x20, 0x0933, "ActivityTypeWalking" }, { 0x20, 0x0934, "ActivityTypeRunning" }, { 0x20, 0x0935, "ActivityTypeInVehicle" }, { 0x20, 0x0936, "ActivityTypeBiking" }, { 0x20, 0x0937, "ActivityTypeIdle" }, { 0x20, 0x0940, "UnitNotSpecified" }, { 0x20, 0x0941, "UnitLux" }, { 0x20, 0x0942, "UnitDegreesKelvin" }, { 0x20, 0x0943, "UnitDegreesCelsius" }, { 0x20, 0x0944, "UnitPascal" }, { 0x20, 0x0945, "UnitNewton" }, { 0x20, 0x0946, "UnitMetersSecond" }, { 0x20, 0x0947, "UnitKilogram" }, { 0x20, 0x0948, "UnitMeter" }, { 0x20, 0x0949, "UnitMetersSecondSecond" }, { 0x20, 0x094a, "UnitFarad" }, { 0x20, 0x094b, "UnitAmpere" }, { 0x20, 0x094c, "UnitWatt" }, { 0x20, 0x094d, "UnitHenry" }, { 0x20, 0x094e, "UnitOhm" }, { 0x20, 0x094f, "UnitVolt" }, { 0x20, 0x0950, "UnitHertz" }, { 0x20, 0x0951, "UnitBar" }, { 0x20, 0x0952, "UnitDegreesAnticlockwise" }, { 0x20, 0x0953, "UnitDegreesClockwise" }, { 0x20, 0x0954, "UnitDegrees" }, { 0x20, 0x0955, "UnitDegreesSecond" }, { 0x20, 0x0956, "UnitDegreesSecondSecond" }, { 0x20, 0x0957, "UnitKnot" }, { 0x20, 0x0958, "UnitPercent" }, { 0x20, 0x0959, "UnitSecond" }, { 0x20, 0x095a, "UnitMillisecond" }, { 0x20, 0x095b, "UnitG" }, { 0x20, 0x095c, "UnitBytes" }, { 0x20, 0x095d, "UnitMilligauss" }, { 0x20, 0x095e, "UnitBits" }, { 0x20, 0x0960, "ActivityStateNoStateChange" }, { 0x20, 0x0961, "ActivityStateStartActivity" }, { 0x20, 0x0962, "ActivityStateEndActivity" }, { 0x20, 0x0970, "Exponent0" }, { 0x20, 0x0971, "Exponent1" }, { 0x20, 0x0972, "Exponent2" }, { 0x20, 0x0973, "Exponent3" }, { 0x20, 0x0974, "Exponent4" }, { 0x20, 0x0975, "Exponent5" }, { 0x20, 0x0976, "Exponent6" }, { 0x20, 0x0977, "Exponent7" }, { 0x20, 0x0978, "Exponent8" }, { 0x20, 0x0979, "Exponent9" }, { 0x20, 0x097a, "ExponentA" }, { 0x20, 0x097b, "ExponentB" }, { 0x20, 0x097c, "ExponentC" }, { 0x20, 0x097d, "ExponentD" }, { 0x20, 0x097e, "ExponentE" }, { 0x20, 0x097f, "ExponentF" }, { 0x20, 0x0980, "DevicePositionUnknown" }, { 0x20, 0x0981, "DevicePositionUnchanged" }, { 0x20, 0x0982, "DevicePositionOnDesk" }, { 0x20, 0x0983, "DevicePositionInHand" }, { 0x20, 0x0984, "DevicePositionMovinginBag" }, { 0x20, 0x0985, "DevicePositionStationaryinBag" }, { 0x20, 0x0990, "StepTypeUnknown" }, { 0x20, 0x0991, "StepTypeWalking" }, { 0x20, 0x0992, "StepTypeRunning" }, { 0x20, 0x09a0, "GestureStateUnknown" }, { 0x20, 0x09a1, "GestureStateStarted" }, { 0x20, 0x09a2, "GestureStateCompleted" }, { 0x20, 0x09a3, "GestureStateCancelled" }, { 0x20, 0x09b0, "HingeFoldContributingPanelUnknown" }, { 0x20, 0x09b1, "HingeFoldContributingPanelPanel1" }, { 0x20, 0x09b2, "HingeFoldContributingPanelPanel2" }, { 0x20, 0x09b3, "HingeFoldContributingPanelBoth" }, { 0x20, 0x09b4, "HingeFoldTypeUnknown" }, { 0x20, 0x09b5, "HingeFoldTypeIncreasing" }, { 0x20, 0x09b6, "HingeFoldTypeDecreasing" }, { 0x20, 0x09c0, "HumanPresenceDetectionTypeVendorDefinedNonBiometric" }, { 0x20, 0x09c1, "HumanPresenceDetectionTypeVendorDefinedBiometric" }, { 0x20, 0x09c2, "HumanPresenceDetectionTypeFacialBiometric" }, { 0x20, 0x09c3, "HumanPresenceDetectionTypeAudioBiometric" }, { 0x20, 0x1000, "ModifierChangeSensitivityAbsolute" }, { 0x20, 0x2000, "ModifierMaximum" }, { 0x20, 0x3000, "ModifierMinimum" }, { 0x20, 0x4000, "ModifierAccuracy" }, { 0x20, 0x5000, "ModifierResolution" }, { 0x20, 0x6000, "ModifierThresholdHigh" }, { 0x20, 0x7000, "ModifierThresholdLow" }, { 0x20, 0x8000, "ModifierCalibrationOffset" }, { 0x20, 0x9000, "ModifierCalibrationMultiplier" }, { 0x20, 0xa000, "ModifierReportInterval" }, { 0x20, 0xb000, "ModifierFrequencyMax" }, { 0x20, 0xc000, "ModifierPeriodMax" }, { 0x20, 0xd000, "ModifierChangeSensitivityPercentofRange" }, { 0x20, 0xe000, "ModifierChangeSensitivityPercentRelative" }, { 0x20, 0xf000, "ModifierVendorReserved" }, { 0x40, 0, "MedicalInstrument" }, { 0x40, 0x0001, "MedicalUltrasound" }, { 0x40, 0x0020, "VCRAcquisition" }, { 0x40, 0x0021, "FreezeThaw" }, { 0x40, 0x0022, "ClipStore" }, { 0x40, 0x0023, "Update" }, { 0x40, 0x0024, "Next" }, { 0x40, 0x0025, "Save" }, { 0x40, 0x0026, "Print" }, { 0x40, 0x0027, "MicrophoneEnable" }, { 0x40, 0x0040, "Cine" }, { 0x40, 0x0041, "TransmitPower" }, { 0x40, 0x0042, "Volume" }, { 0x40, 0x0043, "Focus" }, { 0x40, 0x0044, "Depth" }, { 0x40, 0x0060, "SoftStepPrimary" }, { 0x40, 0x0061, "SoftStepSecondary" }, { 0x40, 0x0070, "DepthGainCompensation" }, { 0x40, 0x0080, "ZoomSelect" }, { 0x40, 0x0081, "ZoomAdjust" }, { 0x40, 0x0082, "SpectralDopplerModeSelect" }, { 0x40, 0x0083, "SpectralDopplerAdjust" }, { 0x40, 0x0084, "ColorDopplerModeSelect" }, { 0x40, 0x0085, "ColorDopplerAdjust" }, { 0x40, 0x0086, "MotionModeSelect" }, { 0x40, 0x0087, "MotionModeAdjust" }, { 0x40, 0x0088, "2DModeSelect" }, { 0x40, 0x0089, "2DModeAdjust" }, { 0x40, 0x00a0, "SoftControlSelect" }, { 0x40, 0x00a1, "SoftControlAdjust" }, { 0x41, 0, "BrailleDisplay" }, { 0x41, 0x0001, "BrailleDisplay" }, { 0x41, 0x0002, "BrailleRow" }, { 0x41, 0x0003, "8DotBrailleCell" }, { 0x41, 0x0004, "6DotBrailleCell" }, { 0x41, 0x0005, "NumberofBrailleCells" }, { 0x41, 0x0006, "ScreenReaderControl" }, { 0x41, 0x0007, "ScreenReaderIdentifier" }, { 0x41, 0x00fa, "RouterSet1" }, { 0x41, 0x00fb, "RouterSet2" }, { 0x41, 0x00fc, "RouterSet3" }, { 0x41, 0x0100, "RouterKey" }, { 0x41, 0x0101, "RowRouterKey" }, { 0x41, 0x0200, "BrailleButtons" }, { 0x41, 0x0201, "BrailleKeyboardDot1" }, { 0x41, 0x0202, "BrailleKeyboardDot2" }, { 0x41, 0x0203, "BrailleKeyboardDot3" }, { 0x41, 0x0204, "BrailleKeyboardDot4" }, { 0x41, 0x0205, "BrailleKeyboardDot5" }, { 0x41, 0x0206, "BrailleKeyboardDot6" }, { 0x41, 0x0207, "BrailleKeyboardDot7" }, { 0x41, 0x0208, "BrailleKeyboardDot8" }, { 0x41, 0x0209, "BrailleKeyboardSpace" }, { 0x41, 0x020a, "BrailleKeyboardLeftSpace" }, { 0x41, 0x020b, "BrailleKeyboardRightSpace" }, { 0x41, 0x020c, "BrailleFaceControls" }, { 0x41, 0x020d, "BrailleLeftControls" }, { 0x41, 0x020e, "BrailleRightControls" }, { 0x41, 0x020f, "BrailleTopControls" }, { 0x41, 0x0210, "BrailleJoystickCenter" }, { 0x41, 0x0211, "BrailleJoystickUp" }, { 0x41, 0x0212, "BrailleJoystickDown" }, { 0x41, 0x0213, "BrailleJoystickLeft" }, { 0x41, 0x0214, "BrailleJoystickRight" }, { 0x41, 0x0215, "BrailleDPadCenter" }, { 0x41, 0x0216, "BrailleDPadUp" }, { 0x41, 0x0217, "BrailleDPadDown" }, { 0x41, 0x0218, "BrailleDPadLeft" }, { 0x41, 0x0219, "BrailleDPadRight" }, { 0x41, 0x021a, "BraillePanLeft" }, { 0x41, 0x021b, "BraillePanRight" }, { 0x41, 0x021c, "BrailleRockerUp" }, { 0x41, 0x021d, "BrailleRockerDown" }, { 0x41, 0x021e, "BrailleRockerPress" }, { 0x59, 0, "LightingAndIllumination" }, { 0x59, 0x0001, "LampArray" }, { 0x59, 0x0002, "LampArrayAttributesReport" }, { 0x59, 0x0003, "LampCount" }, { 0x59, 0x0004, "BoundingBoxWidthInMicrometers" }, { 0x59, 0x0005, "BoundingBoxHeightInMicrometers" }, { 0x59, 0x0006, "BoundingBoxDepthInMicrometers" }, { 0x59, 0x0007, "LampArrayKind" }, { 0x59, 0x0008, "MinUpdateIntervalInMicroseconds" }, { 0x59, 0x0020, "LampAttributesRequestReport" }, { 0x59, 0x0021, "LampId" }, { 0x59, 0x0022, "LampAttributesResponseReport" }, { 0x59, 0x0023, "PositionXInMicrometers" }, { 0x59, 0x0024, "PositionYInMicrometers" }, { 0x59, 0x0025, "PositionZInMicrometers" }, { 0x59, 0x0026, "LampPurposes" }, { 0x59, 0x0027, "UpdateLatencyInMicroseconds" }, { 0x59, 0x0028, "RedLevelCount" }, { 0x59, 0x0029, "GreenLevelCount" }, { 0x59, 0x002a, "BlueLevelCount" }, { 0x59, 0x002b, "IntensityLevelCount" }, { 0x59, 0x002c, "IsProgrammable" }, { 0x59, 0x002d, "InputBinding" }, { 0x59, 0x0050, "LampMultiUpdateReport" }, { 0x59, 0x0051, "RedUpdateChannel" }, { 0x59, 0x0052, "GreenUpdateChannel" }, { 0x59, 0x0053, "BlueUpdateChannel" }, { 0x59, 0x0054, "IntensityUpdateChannel" }, { 0x59, 0x0055, "LampUpdateFlags" }, { 0x59, 0x0060, "LampRangeUpdateReport" }, { 0x59, 0x0061, "LampIdStart" }, { 0x59, 0x0062, "LampIdEnd" }, { 0x59, 0x0070, "LampArrayControlReport" }, { 0x59, 0x0071, "AutonomousMode" }, { 0x80, 0, "Monitor" }, { 0x80, 0x0001, "MonitorControl" }, { 0x80, 0x0002, "EDIDInformation" }, { 0x80, 0x0003, "VDIFInformation" }, { 0x80, 0x0004, "VESAVersion" }, { 0x81, 0, "MonitorEnumerated" }, { 0x82, 0, "VESAVirtualControls" }, { 0x82, 0x0001, "Degauss" }, { 0x82, 0x0010, "Brightness" }, { 0x82, 0x0012, "Contrast" }, { 0x82, 0x0016, "RedVideoGain" }, { 0x82, 0x0018, "GreenVideoGain" }, { 0x82, 0x001a, "BlueVideoGain" }, { 0x82, 0x001c, "Focus" }, { 0x82, 0x0020, "HorizontalPosition" }, { 0x82, 0x0022, "HorizontalSize" }, { 0x82, 0x0024, "HorizontalPincushion" }, { 0x82, 0x0026, "HorizontalPincushionBalance" }, { 0x82, 0x0028, "HorizontalMisconvergence" }, { 0x82, 0x002a, "HorizontalLinearity" }, { 0x82, 0x002c, "HorizontalLinearityBalance" }, { 0x82, 0x0030, "VerticalPosition" }, { 0x82, 0x0032, "VerticalSize" }, { 0x82, 0x0034, "VerticalPincushion" }, { 0x82, 0x0036, "VerticalPincushionBalance" }, { 0x82, 0x0038, "VerticalMisconvergence" }, { 0x82, 0x003a, "VerticalLinearity" }, { 0x82, 0x003c, "VerticalLinearityBalance" }, { 0x82, 0x0040, "ParallelogramDistortionKeyBalance" }, { 0x82, 0x0042, "TrapezoidalDistortionKey" }, { 0x82, 0x0044, "TiltRotation" }, { 0x82, 0x0046, "TopCornerDistortionControl" }, { 0x82, 0x0048, "TopCornerDistortionBalance" }, { 0x82, 0x004a, "BottomCornerDistortionControl" }, { 0x82, 0x004c, "BottomCornerDistortionBalance" }, { 0x82, 0x0056, "HorizontalMoire" }, { 0x82, 0x0058, "VerticalMoire" }, { 0x82, 0x005e, "InputLevelSelect" }, { 0x82, 0x0060, "InputSourceSelect" }, { 0x82, 0x006c, "RedVideoBlackLevel" }, { 0x82, 0x006e, "GreenVideoBlackLevel" }, { 0x82, 0x0070, "BlueVideoBlackLevel" }, { 0x82, 0x00a2, "AutoSizeCenter" }, { 0x82, 0x00a4, "PolarityHorizontalSynchronization" }, { 0x82, 0x00a6, "PolarityVerticalSynchronization" }, { 0x82, 0x00a8, "SynchronizationType" }, { 0x82, 0x00aa, "ScreenOrientation" }, { 0x82, 0x00ac, "HorizontalFrequency" }, { 0x82, 0x00ae, "VerticalFrequency" }, { 0x82, 0x00b0, "Settings" }, { 0x82, 0x00ca, "OnScreenDisplay" }, { 0x82, 0x00d4, "StereoMode" }, { 0x84, 0, "Power" }, { 0x84, 0x0001, "iName" }, { 0x84, 0x0002, "PresentStatus" }, { 0x84, 0x0003, "ChangedStatus" }, { 0x84, 0x0004, "UPS" }, { 0x84, 0x0005, "PowerSupply" }, { 0x84, 0x0010, "BatterySystem" }, { 0x84, 0x0011, "BatterySystemId" }, { 0x84, 0x0012, "Battery" }, { 0x84, 0x0013, "BatteryId" }, { 0x84, 0x0014, "Charger" }, { 0x84, 0x0015, "ChargerId" }, { 0x84, 0x0016, "PowerConverter" }, { 0x84, 0x0017, "PowerConverterId" }, { 0x84, 0x0018, "OutletSystem" }, { 0x84, 0x0019, "OutletSystemId" }, { 0x84, 0x001a, "Input" }, { 0x84, 0x001b, "InputId" }, { 0x84, 0x001c, "Output" }, { 0x84, 0x001d, "OutputId" }, { 0x84, 0x001e, "Flow" }, { 0x84, 0x001f, "FlowId" }, { 0x84, 0x0020, "Outlet" }, { 0x84, 0x0021, "OutletId" }, { 0x84, 0x0022, "Gang" }, { 0x84, 0x0023, "GangId" }, { 0x84, 0x0024, "PowerSummary" }, { 0x84, 0x0025, "PowerSummaryId" }, { 0x84, 0x0030, "Voltage" }, { 0x84, 0x0031, "Current" }, { 0x84, 0x0032, "Frequency" }, { 0x84, 0x0033, "ApparentPower" }, { 0x84, 0x0034, "ActivePower" }, { 0x84, 0x0035, "PercentLoad" }, { 0x84, 0x0036, "Temperature" }, { 0x84, 0x0037, "Humidity" }, { 0x84, 0x0038, "BadCount" }, { 0x84, 0x0040, "ConfigVoltage" }, { 0x84, 0x0041, "ConfigCurrent" }, { 0x84, 0x0042, "ConfigFrequency" }, { 0x84, 0x0043, "ConfigApparentPower" }, { 0x84, 0x0044, "ConfigActivePower" }, { 0x84, 0x0045, "ConfigPercentLoad" }, { 0x84, 0x0046, "ConfigTemperature" }, { 0x84, 0x0047, "ConfigHumidity" }, { 0x84, 0x0050, "SwitchOnControl" }, { 0x84, 0x0051, "SwitchOffControl" }, { 0x84, 0x0052, "ToggleControl" }, { 0x84, 0x0053, "LowVoltageTransfer" }, { 0x84, 0x0054, "HighVoltageTransfer" }, { 0x84, 0x0055, "DelayBeforeReboot" }, { 0x84, 0x0056, "DelayBeforeStartup" }, { 0x84, 0x0057, "DelayBeforeShutdown" }, { 0x84, 0x0058, "Test" }, { 0x84, 0x0059, "ModuleReset" }, { 0x84, 0x005a, "AudibleAlarmControl" }, { 0x84, 0x0060, "Present" }, { 0x84, 0x0061, "Good" }, { 0x84, 0x0062, "InternalFailure" }, { 0x84, 0x0063, "VoltagOutOfRange" }, { 0x84, 0x0064, "FrequencyOutOfRange" }, { 0x84, 0x0065, "Overload" }, { 0x84, 0x0066, "OverCharged" }, { 0x84, 0x0067, "OverTemperature" }, { 0x84, 0x0068, "ShutdownRequested" }, { 0x84, 0x0069, "ShutdownImminent" }, { 0x84, 0x006b, "SwitchOnOff" }, { 0x84, 0x006c, "Switchable" }, { 0x84, 0x006d, "Used" }, { 0x84, 0x006e, "Boost" }, { 0x84, 0x006f, "Buck" }, { 0x84, 0x0070, "Initialized" }, { 0x84, 0x0071, "Tested" }, { 0x84, 0x0072, "AwaitingPower" }, { 0x84, 0x0073, "CommunicationLost" }, { 0x84, 0x00fd, "iManufacturer" }, { 0x84, 0x00fe, "iProduct" }, { 0x84, 0x00ff, "iSerialNumber" }, { 0x85, 0, "BatterySystem" }, { 0x85, 0x0001, "SmartBatteryBatteryMode" }, { 0x85, 0x0002, "SmartBatteryBatteryStatus" }, { 0x85, 0x0003, "SmartBatteryAlarmWarning" }, { 0x85, 0x0004, "SmartBatteryChargerMode" }, { 0x85, 0x0005, "SmartBatteryChargerStatus" }, { 0x85, 0x0006, "SmartBatteryChargerSpecInfo" }, { 0x85, 0x0007, "SmartBatterySelectorState" }, { 0x85, 0x0008, "SmartBatterySelectorPresets" }, { 0x85, 0x0009, "SmartBatterySelectorInfo" }, { 0x85, 0x0010, "OptionalMfgFunction1" }, { 0x85, 0x0011, "OptionalMfgFunction2" }, { 0x85, 0x0012, "OptionalMfgFunction3" }, { 0x85, 0x0013, "OptionalMfgFunction4" }, { 0x85, 0x0014, "OptionalMfgFunction5" }, { 0x85, 0x0015, "ConnectionToSMBus" }, { 0x85, 0x0016, "OutputConnection" }, { 0x85, 0x0017, "ChargerConnection" }, { 0x85, 0x0018, "BatteryInsertion" }, { 0x85, 0x0019, "UseNext" }, { 0x85, 0x001a, "OKToUse" }, { 0x85, 0x001b, "BatterySupported" }, { 0x85, 0x001c, "SelectorRevision" }, { 0x85, 0x001d, "ChargingIndicator" }, { 0x85, 0x0028, "ManufacturerAccess" }, { 0x85, 0x0029, "RemainingCapacityLimit" }, { 0x85, 0x002a, "RemainingTimeLimit" }, { 0x85, 0x002b, "AtRate" }, { 0x85, 0x002c, "CapacityMode" }, { 0x85, 0x002d, "BroadcastToCharger" }, { 0x85, 0x002e, "PrimaryBattery" }, { 0x85, 0x002f, "ChargeController" }, { 0x85, 0x0040, "TerminateCharge" }, { 0x85, 0x0041, "TerminateDischarge" }, { 0x85, 0x0042, "BelowRemainingCapacityLimit" }, { 0x85, 0x0043, "RemainingTimeLimitExpired" }, { 0x85, 0x0044, "Charging" }, { 0x85, 0x0045, "Discharging" }, { 0x85, 0x0046, "FullyCharged" }, { 0x85, 0x0047, "FullyDischarged" }, { 0x85, 0x0048, "ConditioningFlag" }, { 0x85, 0x0049, "AtRateOK" }, { 0x85, 0x004a, "SmartBatteryErrorCode" }, { 0x85, 0x004b, "NeedReplacement" }, { 0x85, 0x0060, "AtRateTimeToFull" }, { 0x85, 0x0061, "AtRateTimeToEmpty" }, { 0x85, 0x0062, "AverageCurrent" }, { 0x85, 0x0063, "MaxError" }, { 0x85, 0x0064, "RelativeStateOfCharge" }, { 0x85, 0x0065, "AbsoluteStateOfCharge" }, { 0x85, 0x0066, "RemainingCapacity" }, { 0x85, 0x0067, "FullChargeCapacity" }, { 0x85, 0x0068, "RunTimeToEmpty" }, { 0x85, 0x0069, "AverageTimeToEmpty" }, { 0x85, 0x006a, "AverageTimeToFull" }, { 0x85, 0x006b, "CycleCount" }, { 0x85, 0x0080, "BatteryPackModelLevel" }, { 0x85, 0x0081, "InternalChargeController" }, { 0x85, 0x0082, "PrimaryBatterySupport" }, { 0x85, 0x0083, "DesignCapacity" }, { 0x85, 0x0084, "SpecificationInfo" }, { 0x85, 0x0085, "ManufactureDate" }, { 0x85, 0x0086, "SerialNumber" }, { 0x85, 0x0087, "iManufacturerName" }, { 0x85, 0x0088, "iDeviceName" }, { 0x85, 0x0089, "iDeviceChemistry" }, { 0x85, 0x008a, "ManufacturerData" }, { 0x85, 0x008b, "Rechargable" }, { 0x85, 0x008c, "WarningCapacityLimit" }, { 0x85, 0x008d, "CapacityGranularity1" }, { 0x85, 0x008e, "CapacityGranularity2" }, { 0x85, 0x008f, "iOEMInformation" }, { 0x85, 0x00c0, "InhibitCharge" }, { 0x85, 0x00c1, "EnablePolling" }, { 0x85, 0x00c2, "ResetToZero" }, { 0x85, 0x00d0, "ACPresent" }, { 0x85, 0x00d1, "BatteryPresent" }, { 0x85, 0x00d2, "PowerFail" }, { 0x85, 0x00d3, "AlarmInhibited" }, { 0x85, 0x00d4, "ThermistorUnderRange" }, { 0x85, 0x00d5, "ThermistorHot" }, { 0x85, 0x00d6, "ThermistorCold" }, { 0x85, 0x00d7, "ThermistorOverRange" }, { 0x85, 0x00d8, "VoltageOutOfRange" }, { 0x85, 0x00d9, "CurrentOutOfRange" }, { 0x85, 0x00da, "CurrentNotRegulated" }, { 0x85, 0x00db, "VoltageNotRegulated" }, { 0x85, 0x00dc, "MasterMode" }, { 0x85, 0x00f0, "ChargerSelectorSupport" }, { 0x85, 0x00f1, "ChargerSpec" }, { 0x85, 0x00f2, "Level2" }, { 0x85, 0x00f3, "Level3" }, { 0x8c, 0, "BarcodeScanner" }, { 0x8c, 0x0001, "BarcodeBadgeReader" }, { 0x8c, 0x0002, "BarcodeScanner" }, { 0x8c, 0x0003, "DumbBarCodeScanner" }, { 0x8c, 0x0004, "CordlessScannerBase" }, { 0x8c, 0x0005, "BarCodeScannerCradle" }, { 0x8c, 0x0010, "AttributeReport" }, { 0x8c, 0x0011, "SettingsReport" }, { 0x8c, 0x0012, "ScannedDataReport" }, { 0x8c, 0x0013, "RawScannedDataReport" }, { 0x8c, 0x0014, "TriggerReport" }, { 0x8c, 0x0015, "StatusReport" }, { 0x8c, 0x0016, "UPCEANControlReport" }, { 0x8c, 0x0017, "EAN23LabelControlReport" }, { 0x8c, 0x0018, "Code39ControlReport" }, { 0x8c, 0x0019, "Interleaved2of5ControlReport" }, { 0x8c, 0x001a, "Standard2of5ControlReport" }, { 0x8c, 0x001b, "MSIPlesseyControlReport" }, { 0x8c, 0x001c, "CodabarControlReport" }, { 0x8c, 0x001d, "Code128ControlReport" }, { 0x8c, 0x001e, "Misc1DControlReport" }, { 0x8c, 0x001f, "2DControlReport" }, { 0x8c, 0x0030, "AimingPointerMode" }, { 0x8c, 0x0031, "BarCodePresentSensor" }, { 0x8c, 0x0032, "Class1ALaser" }, { 0x8c, 0x0033, "Class2Laser" }, { 0x8c, 0x0034, "HeaterPresent" }, { 0x8c, 0x0035, "ContactScanner" }, { 0x8c, 0x0036, "ElectronicArticleSurveillanceNotification" }, { 0x8c, 0x0037, "ConstantElectronicArticleSurveillance" }, { 0x8c, 0x0038, "ErrorIndication" }, { 0x8c, 0x0039, "FixedBeeper" }, { 0x8c, 0x003a, "GoodDecodeIndication" }, { 0x8c, 0x003b, "HandsFreeScanning" }, { 0x8c, 0x003c, "IntrinsicallySafe" }, { 0x8c, 0x003d, "KlasseEinsLaser" }, { 0x8c, 0x003e, "LongRangeScanner" }, { 0x8c, 0x003f, "MirrorSpeedControl" }, { 0x8c, 0x0040, "NotOnFileIndication" }, { 0x8c, 0x0041, "ProgrammableBeeper" }, { 0x8c, 0x0042, "Triggerless" }, { 0x8c, 0x0043, "Wand" }, { 0x8c, 0x0044, "WaterResistant" }, { 0x8c, 0x0045, "MultiRangeScanner" }, { 0x8c, 0x0046, "ProximitySensor" }, { 0x8c, 0x004d, "FragmentDecoding" }, { 0x8c, 0x004e, "ScannerReadConfidence" }, { 0x8c, 0x004f, "DataPrefix" }, { 0x8c, 0x0050, "PrefixAIMI" }, { 0x8c, 0x0051, "PrefixNone" }, { 0x8c, 0x0052, "PrefixProprietary" }, { 0x8c, 0x0055, "ActiveTime" }, { 0x8c, 0x0056, "AimingLaserPattern" }, { 0x8c, 0x0057, "BarCodePresent" }, { 0x8c, 0x0058, "BeeperState" }, { 0x8c, 0x0059, "LaserOnTime" }, { 0x8c, 0x005a, "LaserState" }, { 0x8c, 0x005b, "LockoutTime" }, { 0x8c, 0x005c, "MotorState" }, { 0x8c, 0x005d, "MotorTimeout" }, { 0x8c, 0x005e, "PowerOnResetScanner" }, { 0x8c, 0x005f, "PreventReadofBarcodes" }, { 0x8c, 0x0060, "InitiateBarcodeRead" }, { 0x8c, 0x0061, "TriggerState" }, { 0x8c, 0x0062, "TriggerMode" }, { 0x8c, 0x0063, "TriggerModeBlinkingLaserOn" }, { 0x8c, 0x0064, "TriggerModeContinuousLaserOn" }, { 0x8c, 0x0065, "TriggerModeLaseronwhilePulled" }, { 0x8c, 0x0066, "TriggerModeLaserstaysonafterrelease" }, { 0x8c, 0x006d, "CommitParameterstoNVM" }, { 0x8c, 0x006e, "ParameterScanning" }, { 0x8c, 0x006f, "ParametersChanged" }, { 0x8c, 0x0070, "Setparameterdefaultvalues" }, { 0x8c, 0x0075, "ScannerInCradle" }, { 0x8c, 0x0076, "ScannerInRange" }, { 0x8c, 0x007a, "AimDuration" }, { 0x8c, 0x007b, "GoodReadLampDuration" }, { 0x8c, 0x007c, "GoodReadLampIntensity" }, { 0x8c, 0x007d, "GoodReadLED" }, { 0x8c, 0x007e, "GoodReadToneFrequency" }, { 0x8c, 0x007f, "GoodReadToneLength" }, { 0x8c, 0x0080, "GoodReadToneVolume" }, { 0x8c, 0x0082, "NoReadMessage" }, { 0x8c, 0x0083, "NotonFileVolume" }, { 0x8c, 0x0084, "PowerupBeep" }, { 0x8c, 0x0085, "SoundErrorBeep" }, { 0x8c, 0x0086, "SoundGoodReadBeep" }, { 0x8c, 0x0087, "SoundNotOnFileBeep" }, { 0x8c, 0x0088, "GoodReadWhentoWrite" }, { 0x8c, 0x0089, "GRWTIAfterDecode" }, { 0x8c, 0x008a, "GRWTIBeepLampaftertransmit" }, { 0x8c, 0x008b, "GRWTINoBeepLampuseatall" }, { 0x8c, 0x0091, "BooklandEAN" }, { 0x8c, 0x0092, "ConvertEAN8to13Type" }, { 0x8c, 0x0093, "ConvertUPCAtoEAN13" }, { 0x8c, 0x0094, "ConvertUPCEtoA" }, { 0x8c, 0x0095, "EAN13" }, { 0x8c, 0x0096, "EAN8" }, { 0x8c, 0x0097, "EAN99128Mandatory" }, { 0x8c, 0x0098, "EAN99P5128Optional" }, { 0x8c, 0x0099, "EnableEANTwoLabel" }, { 0x8c, 0x009a, "UPCEAN" }, { 0x8c, 0x009b, "UPCEANCouponCode" }, { 0x8c, 0x009c, "UPCEANPeriodicals" }, { 0x8c, 0x009d, "UPCA" }, { 0x8c, 0x009e, "UPCAwith128Mandatory" }, { 0x8c, 0x009f, "UPCAwith128Optional" }, { 0x8c, 0x00a0, "UPCAwithP5Optional" }, { 0x8c, 0x00a1, "UPCE" }, { 0x8c, 0x00a2, "UPCE1" }, { 0x8c, 0x00a9, "Periodical" }, { 0x8c, 0x00aa, "PeriodicalAutoDiscriminate2" }, { 0x8c, 0x00ab, "PeriodicalOnlyDecodewith2" }, { 0x8c, 0x00ac, "PeriodicalIgnore2" }, { 0x8c, 0x00ad, "PeriodicalAutoDiscriminate5" }, { 0x8c, 0x00ae, "PeriodicalOnlyDecodewith5" }, { 0x8c, 0x00af, "PeriodicalIgnore5" }, { 0x8c, 0x00b0, "Check" }, { 0x8c, 0x00b1, "CheckDisablePrice" }, { 0x8c, 0x00b2, "CheckEnable4digitPrice" }, { 0x8c, 0x00b3, "CheckEnable5digitPrice" }, { 0x8c, 0x00b4, "CheckEnableEuropean4digitPrice" }, { 0x8c, 0x00b5, "CheckEnableEuropean5digitPrice" }, { 0x8c, 0x00b7, "EANTwoLabel" }, { 0x8c, 0x00b8, "EANThreeLabel" }, { 0x8c, 0x00b9, "EAN8FlagDigit1" }, { 0x8c, 0x00ba, "EAN8FlagDigit2" }, { 0x8c, 0x00bb, "EAN8FlagDigit3" }, { 0x8c, 0x00bc, "EAN13FlagDigit1" }, { 0x8c, 0x00bd, "EAN13FlagDigit2" }, { 0x8c, 0x00be, "EAN13FlagDigit3" }, { 0x8c, 0x00bf, "AddEAN23LabelDefinition" }, { 0x8c, 0x00c0, "ClearallEAN23LabelDefinitions" }, { 0x8c, 0x00c3, "Codabar" }, { 0x8c, 0x00c4, "Code128" }, { 0x8c, 0x00c7, "Code39" }, { 0x8c, 0x00c8, "Code93" }, { 0x8c, 0x00c9, "FullASCIIConversion" }, { 0x8c, 0x00ca, "Interleaved2of5" }, { 0x8c, 0x00cb, "ItalianPharmacyCode" }, { 0x8c, 0x00cc, "MSIPlessey" }, { 0x8c, 0x00cd, "Standard2of5IATA" }, { 0x8c, 0x00ce, "Standard2of5" }, { 0x8c, 0x00d3, "TransmitStartStop" }, { 0x8c, 0x00d4, "TriOptic" }, { 0x8c, 0x00d5, "UCCEAN128" }, { 0x8c, 0x00d6, "CheckDigit" }, { 0x8c, 0x00d7, "CheckDigitDisable" }, { 0x8c, 0x00d8, "CheckDigitEnableInterleaved2of5OPCC" }, { 0x8c, 0x00d9, "CheckDigitEnableInterleaved2of5USS" }, { 0x8c, 0x00da, "CheckDigitEnableStandard2of5OPCC" }, { 0x8c, 0x00db, "CheckDigitEnableStandard2of5USS" }, { 0x8c, 0x00dc, "CheckDigitEnableOneMSIPlessey" }, { 0x8c, 0x00dd, "CheckDigitEnableTwoMSIPlessey" }, { 0x8c, 0x00de, "CheckDigitCodabarEnable" }, { 0x8c, 0x00df, "CheckDigitCode39Enable" }, { 0x8c, 0x00f0, "TransmitCheckDigit" }, { 0x8c, 0x00f1, "DisableCheckDigitTransmit" }, { 0x8c, 0x00f2, "EnableCheckDigitTransmit" }, { 0x8c, 0x00fb, "SymbologyIdentifier1" }, { 0x8c, 0x00fc, "SymbologyIdentifier2" }, { 0x8c, 0x00fd, "SymbologyIdentifier3" }, { 0x8c, 0x00fe, "DecodedData" }, { 0x8c, 0x00ff, "DecodeDataContinued" }, { 0x8c, 0x0100, "BarSpaceData" }, { 0x8c, 0x0101, "ScannerDataAccuracy" }, { 0x8c, 0x0102, "RawDataPolarity" }, { 0x8c, 0x0103, "PolarityInvertedBarCode" }, { 0x8c, 0x0104, "PolarityNormalBarCode" }, { 0x8c, 0x0106, "MinimumLengthtoDecode" }, { 0x8c, 0x0107, "MaximumLengthtoDecode" }, { 0x8c, 0x0108, "DiscreteLengthtoDecode1" }, { 0x8c, 0x0109, "DiscreteLengthtoDecode2" }, { 0x8c, 0x010a, "DataLengthMethod" }, { 0x8c, 0x010b, "DLMethodReadany" }, { 0x8c, 0x010c, "DLMethodCheckinRange" }, { 0x8c, 0x010d, "DLMethodCheckforDiscrete" }, { 0x8c, 0x0110, "AztecCode" }, { 0x8c, 0x0111, "BC412" }, { 0x8c, 0x0112, "ChannelCode" }, { 0x8c, 0x0113, "Code16" }, { 0x8c, 0x0114, "Code32" }, { 0x8c, 0x0115, "Code49" }, { 0x8c, 0x0116, "CodeOne" }, { 0x8c, 0x0117, "Colorcode" }, { 0x8c, 0x0118, "DataMatrix" }, { 0x8c, 0x0119, "MaxiCode" }, { 0x8c, 0x011a, "MicroPDF" }, { 0x8c, 0x011b, "PDF417" }, { 0x8c, 0x011c, "PosiCode" }, { 0x8c, 0x011d, "QRCode" }, { 0x8c, 0x011e, "SuperCode" }, { 0x8c, 0x011f, "UltraCode" }, { 0x8c, 0x0120, "USD5SlugCode" }, { 0x8c, 0x0121, "VeriCode" }, { 0x8d, 0, "Scales" }, { 0x8d, 0x0001, "Scales" }, { 0x8d, 0x0020, "ScaleDevice" }, { 0x8d, 0x0021, "ScaleClass" }, { 0x8d, 0x0022, "ScaleClassIMetric" }, { 0x8d, 0x0023, "ScaleClassIIMetric" }, { 0x8d, 0x0024, "ScaleClassIIIMetric" }, { 0x8d, 0x0025, "ScaleClassIIILMetric" }, { 0x8d, 0x0026, "ScaleClassIVMetric" }, { 0x8d, 0x0027, "ScaleClassIIIEnglish" }, { 0x8d, 0x0028, "ScaleClassIIILEnglish" }, { 0x8d, 0x0029, "ScaleClassIVEnglish" }, { 0x8d, 0x002a, "ScaleClassGeneric" }, { 0x8d, 0x0030, "ScaleAttributeReport" }, { 0x8d, 0x0031, "ScaleControlReport" }, { 0x8d, 0x0032, "ScaleDataReport" }, { 0x8d, 0x0033, "ScaleStatusReport" }, { 0x8d, 0x0034, "ScaleWeightLimitReport" }, { 0x8d, 0x0035, "ScaleStatisticsReport" }, { 0x8d, 0x0040, "DataWeight" }, { 0x8d, 0x0041, "DataScaling" }, { 0x8d, 0x0050, "WeightUnit" }, { 0x8d, 0x0051, "WeightUnitMilligram" }, { 0x8d, 0x0052, "WeightUnitGram" }, { 0x8d, 0x0053, "WeightUnitKilogram" }, { 0x8d, 0x0054, "WeightUnitCarats" }, { 0x8d, 0x0055, "WeightUnitTaels" }, { 0x8d, 0x0056, "WeightUnitGrains" }, { 0x8d, 0x0057, "WeightUnitPennyweights" }, { 0x8d, 0x0058, "WeightUnitMetricTon" }, { 0x8d, 0x0059, "WeightUnitAvoirTon" }, { 0x8d, 0x005a, "WeightUnitTroyOunce" }, { 0x8d, 0x005b, "WeightUnitOunce" }, { 0x8d, 0x005c, "WeightUnitPound" }, { 0x8d, 0x0060, "CalibrationCount" }, { 0x8d, 0x0061, "ReZeroCount" }, { 0x8d, 0x0070, "ScaleStatus" }, { 0x8d, 0x0071, "ScaleStatusFault" }, { 0x8d, 0x0072, "ScaleStatusStableatCenterofZero" }, { 0x8d, 0x0073, "ScaleStatusInMotion" }, { 0x8d, 0x0074, "ScaleStatusWeightStable" }, { 0x8d, 0x0075, "ScaleStatusUnderZero" }, { 0x8d, 0x0076, "ScaleStatusOverWeightLimit" }, { 0x8d, 0x0077, "ScaleStatusRequiresCalibration" }, { 0x8d, 0x0078, "ScaleStatusRequiresRezeroing" }, { 0x8d, 0x0080, "ZeroScale" }, { 0x8d, 0x0081, "EnforcedZeroReturn" }, { 0x8e, 0, "MagneticStripeReader" }, { 0x8e, 0x0001, "MSRDeviceReadOnly" }, { 0x8e, 0x0011, "Track1Length" }, { 0x8e, 0x0012, "Track2Length" }, { 0x8e, 0x0013, "Track3Length" }, { 0x8e, 0x0014, "TrackJISLength" }, { 0x8e, 0x0020, "TrackData" }, { 0x8e, 0x0021, "Track1Data" }, { 0x8e, 0x0022, "Track2Data" }, { 0x8e, 0x0023, "Track3Data" }, { 0x8e, 0x0024, "TrackJISData" }, { 0x90, 0, "CameraControl" }, { 0x90, 0x0020, "CameraAutofocus" }, { 0x90, 0x0021, "CameraShutter" }, { 0x91, 0, "Arcade" }, { 0x91, 0x0001, "GeneralPurposeIOCard" }, { 0x91, 0x0002, "CoinDoor" }, { 0x91, 0x0003, "WatchdogTimer" }, { 0x91, 0x0030, "GeneralPurposeAnalogInputState" }, { 0x91, 0x0031, "GeneralPurposeDigitalInputState" }, { 0x91, 0x0032, "GeneralPurposeOpticalInputState" }, { 0x91, 0x0033, "GeneralPurposeDigitalOutputState" }, { 0x91, 0x0034, "NumberofCoinDoors" }, { 0x91, 0x0035, "CoinDrawerDropCount" }, { 0x91, 0x0036, "CoinDrawerStart" }, { 0x91, 0x0037, "CoinDrawerService" }, { 0x91, 0x0038, "CoinDrawerTilt" }, { 0x91, 0x0039, "CoinDoorTest" }, { 0x91, 0x0040, "CoinDoorLockout" }, { 0x91, 0x0041, "WatchdogTimeout" }, { 0x91, 0x0042, "WatchdogAction" }, { 0x91, 0x0043, "WatchdogReboot" }, { 0x91, 0x0044, "WatchdogRestart" }, { 0x91, 0x0045, "AlarmInput" }, { 0x91, 0x0046, "CoinDoorCounter" }, { 0x91, 0x0047, "IODirectionMapping" }, { 0x91, 0x0048, "SetIODirectionMapping" }, { 0x91, 0x0049, "ExtendedOpticalInputState" }, { 0x91, 0x004a, "PinPadInputState" }, { 0x91, 0x004b, "PinPadStatus" }, { 0x91, 0x004c, "PinPadOutput" }, { 0x91, 0x004d, "PinPadCommand" }, { 0xf1d0, 0, "FIDOAlliance" }, { 0xf1d0, 0x0001, "U2FAuthenticatorDevice" }, { 0xf1d0, 0x0020, "InputReportData" }, { 0xf1d0, 0x0021, "OutputReportData" }, /* pages 0xff00 to 0xffff are vendor-specific */ { 0xffff, 0, "Vendor-specific-FF" }, { 0, 0, NULL } }; /* Either output directly into simple seq_file, or (if f == NULL) * allocate a separate buffer that will then be passed to the 'events' * ringbuffer. * * This is because these functions can be called both for "one-shot" * "rdesc" while resolving, or for blocking "events". * * This holds both for resolv_usage_page() and hid_resolv_usage(). */ static char *resolv_usage_page(unsigned page, struct seq_file *f) { const struct hid_usage_entry *p; char *buf = NULL; if (!f) { buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC); if (!buf) return ERR_PTR(-ENOMEM); } for (p = hid_usage_table; p->description; p++) if (p->page == page) { if (!f) { snprintf(buf, HID_DEBUG_BUFSIZE, "%s", p->description); return buf; } else { seq_printf(f, "%s", p->description); return NULL; } } if (!f) snprintf(buf, HID_DEBUG_BUFSIZE, "%04x", page); else seq_printf(f, "%04x", page); return buf; } char *hid_resolv_usage(unsigned usage, struct seq_file *f) { const struct hid_usage_entry *p; const struct hid_usage_entry *m; char *buf = NULL; int len = 0; const char *modifier = NULL; unsigned int usage_modifier = usage & 0xF000; unsigned int usage_actual = usage & 0xFFFF; buf = resolv_usage_page(usage >> 16, f); if (IS_ERR(buf)) { pr_err("error allocating HID debug buffer\n"); return NULL; } if (!f) { len = strlen(buf); len += scnprintf(buf + len, HID_DEBUG_BUFSIZE - len, "."); } else { seq_printf(f, "."); } for (p = hid_usage_table; p->description; p++) if (p->page == (usage >> 16)) { if (p->page == 0x20 && usage_modifier) { for (m = p; m->description; m++) { if (p->page == m->page && m->usage == usage_modifier) { modifier = m->description; break; } } if (modifier) usage_actual = usage_actual & 0x0FFF; } if (!modifier) modifier = ""; for(++p; p->description && p->usage != 0; p++) if (p->usage == usage_actual) { if (!f) snprintf(buf + len, HID_DEBUG_BUFSIZE - len, "%s%s", p->description, modifier); else seq_printf(f, "%s%s", p->description, modifier); return buf; } break; } if (!f) snprintf(buf + len, HID_DEBUG_BUFSIZE - len, "%04x", usage & 0xffff); else seq_printf(f, "%04x", usage & 0xffff); return buf; } EXPORT_SYMBOL_GPL(hid_resolv_usage); static void tab(int n, struct seq_file *f) { seq_printf(f, "%*s", n, ""); } void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) { int j; if (field->physical) { tab(n, f); seq_printf(f, "Physical("); hid_resolv_usage(field->physical, f); seq_printf(f, ")\n"); } if (field->logical) { tab(n, f); seq_printf(f, "Logical("); hid_resolv_usage(field->logical, f); seq_printf(f, ")\n"); } if (field->application) { tab(n, f); seq_printf(f, "Application("); hid_resolv_usage(field->application, f); seq_printf(f, ")\n"); } tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage); for (j = 0; j < field->maxusage; j++) { tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n"); } if (field->logical_minimum != field->logical_maximum) { tab(n, f); seq_printf(f, "Logical Minimum(%d)\n", field->logical_minimum); tab(n, f); seq_printf(f, "Logical Maximum(%d)\n", field->logical_maximum); } if (field->physical_minimum != field->physical_maximum) { tab(n, f); seq_printf(f, "Physical Minimum(%d)\n", field->physical_minimum); tab(n, f); seq_printf(f, "Physical Maximum(%d)\n", field->physical_maximum); } if (field->unit_exponent) { tab(n, f); seq_printf(f, "Unit Exponent(%d)\n", field->unit_exponent); } if (field->unit) { static const char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" }; static const char *units[5][8] = { { "None", "None", "None", "None", "None", "None", "None", "None" }, { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" }, { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" }, { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }, { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" } }; int i; int sys; __u32 data = field->unit; /* First nibble tells us which system we're in. */ sys = data & 0xf; data >>= 4; if(sys > 4) { tab(n, f); seq_printf(f, "Unit(Invalid)\n"); } else { int earlier_unit = 0; tab(n, f); seq_printf(f, "Unit(%s : ", systems[sys]); for (i=1 ; i<sizeof(__u32)*2 ; i++) { char nibble = data & 0xf; data >>= 4; if (nibble != 0) { if(earlier_unit++ > 0) seq_printf(f, "*"); seq_printf(f, "%s", units[sys][i]); if(nibble != 1) { /* This is a _signed_ nibble(!) */ int val = nibble & 0x7; if(nibble & 0x08) val = -((0x7 & ~val) +1); seq_printf(f, "^%d", val); } } } seq_printf(f, ")\n"); } } tab(n, f); seq_printf(f, "Report Size(%u)\n", field->report_size); tab(n, f); seq_printf(f, "Report Count(%u)\n", field->report_count); tab(n, f); seq_printf(f, "Report Offset(%u)\n", field->report_offset); tab(n, f); seq_printf(f, "Flags( "); j = field->flags; seq_printf(f, "%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array "); seq_printf(f, "%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute "); seq_printf(f, "%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPreferredState " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : ""); seq_printf(f, ")\n"); } EXPORT_SYMBOL_GPL(hid_dump_field); void hid_dump_device(struct hid_device *device, struct seq_file *f) { struct hid_report_enum *report_enum; struct hid_report *report; struct list_head *list; unsigned i,k; static const char *table[] = {"INPUT", "OUTPUT", "FEATURE"}; for (i = 0; i < HID_REPORT_TYPES; i++) { report_enum = device->report_enum + i; list = report_enum->report_list.next; while (list != &report_enum->report_list) { report = (struct hid_report *) list; tab(2, f); seq_printf(f, "%s", table[i]); if (report->id) seq_printf(f, "(%d)", report->id); seq_printf(f, "[%s]", table[report->type]); seq_printf(f, "\n"); for (k = 0; k < report->maxfield; k++) { tab(4, f); seq_printf(f, "Field(%d)\n", k); hid_dump_field(report->field[k], 6, f); } list = list->next; } } } EXPORT_SYMBOL_GPL(hid_dump_device); /* enqueue string to 'events' ring buffer */ void hid_debug_event(struct hid_device *hdev, char *buf) { struct hid_debug_list *list; unsigned long flags; spin_lock_irqsave(&hdev->debug_list_lock, flags); list_for_each_entry(list, &hdev->debug_list, node) kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_debug_event); void hid_dump_report(struct hid_device *hid, int type, u8 *data, int size) { struct hid_report_enum *report_enum; char *buf; unsigned int i; buf = kmalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC); if (!buf) return; report_enum = hid->report_enum + type; /* dump the report */ snprintf(buf, HID_DEBUG_BUFSIZE - 1, "\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un"); hid_debug_event(hid, buf); for (i = 0; i < size; i++) { snprintf(buf, HID_DEBUG_BUFSIZE - 1, " %02x", data[i]); hid_debug_event(hid, buf); } hid_debug_event(hid, "\n"); kfree(buf); } EXPORT_SYMBOL_GPL(hid_dump_report); void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 value) { char *buf; int len; buf = hid_resolv_usage(usage->hid, NULL); if (!buf) return; len = strlen(buf); snprintf(buf + len, HID_DEBUG_BUFSIZE - len - 1, " = %d\n", value); hid_debug_event(hdev, buf); kfree(buf); wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_dump_input); static const char *events[EV_MAX + 1] = { [EV_SYN] = "Sync", [EV_KEY] = "Key", [EV_REL] = "Relative", [EV_ABS] = "Absolute", [EV_MSC] = "Misc", [EV_LED] = "LED", [EV_SND] = "Sound", [EV_REP] = "Repeat", [EV_FF] = "ForceFeedback", [EV_PWR] = "Power", [EV_FF_STATUS] = "ForceFeedbackStatus", [EV_SW] = "Software", }; static const char *syncs[SYN_CNT] = { [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config", [SYN_MT_REPORT] = "MT Report", [SYN_DROPPED] = "Dropped", }; static const char *keys[KEY_MAX + 1] = { [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc", [KEY_1] = "1", [KEY_2] = "2", [KEY_3] = "3", [KEY_4] = "4", [KEY_5] = "5", [KEY_6] = "6", [KEY_7] = "7", [KEY_8] = "8", [KEY_9] = "9", [KEY_0] = "0", [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal", [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab", [KEY_Q] = "Q", [KEY_W] = "W", [KEY_E] = "E", [KEY_R] = "R", [KEY_T] = "T", [KEY_Y] = "Y", [KEY_U] = "U", [KEY_I] = "I", [KEY_O] = "O", [KEY_P] = "P", [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace", [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl", [KEY_A] = "A", [KEY_S] = "S", [KEY_D] = "D", [KEY_F] = "F", [KEY_G] = "G", [KEY_H] = "H", [KEY_J] = "J", [KEY_K] = "K", [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon", [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave", [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash", [KEY_Z] = "Z", [KEY_X] = "X", [KEY_C] = "C", [KEY_V] = "V", [KEY_B] = "B", [KEY_N] = "N", [KEY_M] = "M", [KEY_COMMA] = "Comma", [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash", [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk", [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space", [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1", [KEY_F2] = "F2", [KEY_F3] = "F3", [KEY_F4] = "F4", [KEY_F5] = "F5", [KEY_F6] = "F6", [KEY_F7] = "F7", [KEY_F8] = "F8", [KEY_F9] = "F9", [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock", [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7", [KEY_KP8] = "KP8", [KEY_KP9] = "KP9", [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4", [KEY_KP5] = "KP5", [KEY_KP6] = "KP6", [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1", [KEY_KP2] = "KP2", [KEY_KP3] = "KP3", [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot", [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd", [KEY_F11] = "F11", [KEY_F12] = "F12", [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana", [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan", [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan", [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter", [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash", [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt", [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home", [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp", [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right", [KEY_END] = "End", [KEY_DOWN] = "Down", [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert", [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro", [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown", [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power", [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus", [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma", [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja", [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta", [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose", [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again", [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo", [KEY_FRONT] = "Front", [KEY_COPY] = "Copy", [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste", [KEY_FIND] = "Find", [KEY_CUT] = "Cut", [KEY_HELP] = "Help", [KEY_MENU] = "Menu", [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup", [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp", [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile", [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer", [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2", [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS", [KEY_COFFEE] = "Coffee", [KEY_ROTATE_DISPLAY] = "RotateDisplay", [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail", [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer", [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward", [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD", [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong", [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong", [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record", [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone", [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config", [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh", [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move", [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp", [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis", [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New", [KEY_REDO] = "Redo", [KEY_F13] = "F13", [KEY_F14] = "F14", [KEY_F15] = "F15", [KEY_F16] = "F16", [KEY_F17] = "F17", [KEY_F18] = "F18", [KEY_F19] = "F19", [KEY_F20] = "F20", [KEY_F21] = "F21", [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", [KEY_PROG4] = "Prog4", [KEY_ALL_APPLICATIONS] = "AllApplications", [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound", [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email", [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search", [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance", [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop", [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel", [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp", [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown", [BTN_DPAD_UP] = "BtnDPadUp", [BTN_DPAD_DOWN] = "BtnDPadDown", [BTN_DPAD_LEFT] = "BtnDPadLeft", [BTN_DPAD_RIGHT] = "BtnDPadRight", [BTN_0] = "Btn0", [BTN_1] = "Btn1", [BTN_2] = "Btn2", [BTN_3] = "Btn3", [BTN_4] = "Btn4", [BTN_5] = "Btn5", [BTN_6] = "Btn6", [BTN_7] = "Btn7", [BTN_8] = "Btn8", [BTN_9] = "Btn9", [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn", [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn", [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn", [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn", [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn", [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn", [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn", [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2", [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4", [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6", [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA", [BTN_B] = "BtnB", [BTN_C] = "BtnC", [BTN_X] = "BtnX", [BTN_Y] = "BtnY", [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL", [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2", [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect", [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode", [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR", [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber", [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil", [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger", [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens", [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus", [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap", [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_TOOL_QUADTAP] = "ToolQuadrupleTap", [BTN_GEAR_DOWN] = "WheelBtn", [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok", [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto", [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2", [KEY_OPTION] = "Option", [KEY_INFO] = "Info", [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor", [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program", [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites", [KEY_EPG] = "EPG", [KEY_PVR] = "PVR", [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language", [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle", [KEY_ANGLE] = "Angle", [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard", [KEY_PC] = "PC", [KEY_TV] = "TV", [KEY_TV2] = "TV2", [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2", [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2", [KEY_CD] = "CD", [KEY_TAPE] = "Tape", [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner", [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text", [KEY_DVD] = "DVD", [KEY_AUX] = "Aux", [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio", [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory", [KEY_LIST] = "List", [KEY_MEMO] = "Memo", [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red", [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow", [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp", [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First", [KEY_LAST] = "Last", [KEY_AB] = "AB", [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart", [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle", [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous", [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN", [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL", [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine", [KEY_DEL_LINE] = "DeleteLine", [KEY_SEND] = "Send", [KEY_REPLY] = "Reply", [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save", [KEY_DOCUMENTS] = "Documents", [KEY_SPELLCHECK] = "SpellCheck", [KEY_LOGOFF] = "Logoff", [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC", [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2", [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D", [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F", [KEY_FN_S] = "Fn+S", [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2", [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4", [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6", [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8", [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10", [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12", [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle", [KEY_KBDILLUMDOWN] = "KbdIlluminationDown", [KEY_KBDILLUMUP] = "KbdIlluminationUp", [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode", [KEY_BUTTONCONFIG] = "ButtonConfig", [KEY_TASKMANAGER] = "TaskManager", [KEY_JOURNAL] = "Journal", [KEY_CONTROLPANEL] = "ControlPanel", [KEY_APPSELECT] = "AppSelect", [KEY_SCREENSAVER] = "ScreenSaver", [KEY_VOICECOMMAND] = "VoiceCommand", [KEY_ASSISTANT] = "Assistant", [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext", [KEY_EMOJI_PICKER] = "EmojiPicker", [KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable", [KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable", [KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle", [KEY_ACCESSIBILITY] = "Accessibility", [KEY_DO_NOT_DISTURB] = "DoNotDisturb", [KEY_DICTATE] = "Dictate", [KEY_MICMUTE] = "MicrophoneMute", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev", [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext", [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup", [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup", [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept", [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel", [KEY_MACRO1] = "Macro1", [KEY_MACRO2] = "Macro2", [KEY_MACRO3] = "Macro3", [KEY_MACRO4] = "Macro4", [KEY_MACRO5] = "Macro5", [KEY_MACRO6] = "Macro6", [KEY_MACRO7] = "Macro7", [KEY_MACRO8] = "Macro8", [KEY_MACRO9] = "Macro9", [KEY_MACRO10] = "Macro10", [KEY_MACRO11] = "Macro11", [KEY_MACRO12] = "Macro12", [KEY_MACRO13] = "Macro13", [KEY_MACRO14] = "Macro14", [KEY_MACRO15] = "Macro15", [KEY_MACRO16] = "Macro16", [KEY_MACRO17] = "Macro17", [KEY_MACRO18] = "Macro18", [KEY_MACRO19] = "Macro19", [KEY_MACRO20] = "Macro20", [KEY_MACRO21] = "Macro21", [KEY_MACRO22] = "Macro22", [KEY_MACRO23] = "Macro23", [KEY_MACRO24] = "Macro24", [KEY_MACRO25] = "Macro25", [KEY_MACRO26] = "Macro26", [KEY_MACRO27] = "Macro27", [KEY_MACRO28] = "Macro28", [KEY_MACRO29] = "Macro29", [KEY_MACRO30] = "Macro30", [BTN_TRIGGER_HAPPY1] = "TriggerHappy1", [BTN_TRIGGER_HAPPY2] = "TriggerHappy2", [BTN_TRIGGER_HAPPY3] = "TriggerHappy3", [BTN_TRIGGER_HAPPY4] = "TriggerHappy4", [BTN_TRIGGER_HAPPY5] = "TriggerHappy5", [BTN_TRIGGER_HAPPY6] = "TriggerHappy6", [BTN_TRIGGER_HAPPY7] = "TriggerHappy7", [BTN_TRIGGER_HAPPY8] = "TriggerHappy8", [BTN_TRIGGER_HAPPY9] = "TriggerHappy9", [BTN_TRIGGER_HAPPY10] = "TriggerHappy10", [BTN_TRIGGER_HAPPY11] = "TriggerHappy11", [BTN_TRIGGER_HAPPY12] = "TriggerHappy12", [BTN_TRIGGER_HAPPY13] = "TriggerHappy13", [BTN_TRIGGER_HAPPY14] = "TriggerHappy14", [BTN_TRIGGER_HAPPY15] = "TriggerHappy15", [BTN_TRIGGER_HAPPY16] = "TriggerHappy16", [BTN_TRIGGER_HAPPY17] = "TriggerHappy17", [BTN_TRIGGER_HAPPY18] = "TriggerHappy18", [BTN_TRIGGER_HAPPY19] = "TriggerHappy19", [BTN_TRIGGER_HAPPY20] = "TriggerHappy20", [BTN_TRIGGER_HAPPY21] = "TriggerHappy21", [BTN_TRIGGER_HAPPY22] = "TriggerHappy22", [BTN_TRIGGER_HAPPY23] = "TriggerHappy23", [BTN_TRIGGER_HAPPY24] = "TriggerHappy24", [BTN_TRIGGER_HAPPY25] = "TriggerHappy25", [BTN_TRIGGER_HAPPY26] = "TriggerHappy26", [BTN_TRIGGER_HAPPY27] = "TriggerHappy27", [BTN_TRIGGER_HAPPY28] = "TriggerHappy28", [BTN_TRIGGER_HAPPY29] = "TriggerHappy29", [BTN_TRIGGER_HAPPY30] = "TriggerHappy30", [BTN_TRIGGER_HAPPY31] = "TriggerHappy31", [BTN_TRIGGER_HAPPY32] = "TriggerHappy32", [BTN_TRIGGER_HAPPY33] = "TriggerHappy33", [BTN_TRIGGER_HAPPY34] = "TriggerHappy34", [BTN_TRIGGER_HAPPY35] = "TriggerHappy35", [BTN_TRIGGER_HAPPY36] = "TriggerHappy36", [BTN_TRIGGER_HAPPY37] = "TriggerHappy37", [BTN_TRIGGER_HAPPY38] = "TriggerHappy38", [BTN_TRIGGER_HAPPY39] = "TriggerHappy39", [BTN_TRIGGER_HAPPY40] = "TriggerHappy40", [BTN_STYLUS3] = "Stylus3", [BTN_TOOL_QUINTTAP] = "ToolQuintTap", [KEY_10CHANNELSDOWN] = "10ChannelsDown", [KEY_10CHANNELSUP] = "10ChannelsUp", [KEY_3D_MODE] = "3DMode", [KEY_ADDRESSBOOK] = "Addressbook", [KEY_ALS_TOGGLE] = "ALSToggle", [KEY_ASPECT_RATIO] = "AspectRatio", [KEY_ATTENDANT_OFF] = "AttendantOff", [KEY_ATTENDANT_ON] = "AttendantOn", [KEY_ATTENDANT_TOGGLE] = "AttendantToggle", [KEY_AUDIO_DESC] = "AudioDesc", [KEY_AUTOPILOT_ENGAGE_TOGGLE] = "AutoPiloteEngage", [KEY_BATTERY] = "Battery", [KEY_BLUETOOTH] = "BlueTooth", [KEY_BRIGHTNESS_CYCLE] = "BrightnessCycle", [KEY_BRIGHTNESS_MENU] = "BrightnessMenu", [KEY_BRL_DOT1] = "BrlDot1", [KEY_BRL_DOT10] = "BrlDot10", [KEY_BRL_DOT2] = "BrlDot2", [KEY_BRL_DOT3] = "BrlDot3", [KEY_BRL_DOT4] = "BrlDot4", [KEY_BRL_DOT5] = "BrlDot5", [KEY_BRL_DOT6] = "BrlDot6", [KEY_BRL_DOT7] = "BrlDot7", [KEY_BRL_DOT8] = "BrlDot8", [KEY_BRL_DOT9] = "BrlDot9", [KEY_CAMERA_DOWN] = "CameraDown", [KEY_CAMERA_FOCUS] = "CameraFocus", [KEY_CAMERA_LEFT] = "CameraLeft", [KEY_CAMERA_RIGHT] = "CameraRight", [KEY_CAMERA_UP] = "CameraUp", [KEY_CAMERA_ZOOMIN] = "CameraZoomIn", [KEY_CAMERA_ZOOMOUT] = "CameraZoomOut", [KEY_CLEARVU_SONAR] = "ClearVUSonar", [KEY_CONTEXT_MENU] = "ContextMenu", [KEY_DATA] = "Data", [KEY_DATABASE] = "DataBase", [KEY_DISPLAY_OFF] = "DisplayOff", [KEY_DISPLAYTOGGLE] = "DisplayToggle", [KEY_DOLLAR] = "Dollar", [KEY_DUAL_RANGE_RADAR] = "DualRangeRadat", [KEY_EDITOR] = "Editor", [KEY_EURO] = "Euro", [KEY_FASTREVERSE] = "FastReverse", [KEY_FISHING_CHART] = "FishingChart", [KEY_FN_RIGHT_SHIFT] = "FnRightShift", [KEY_FRAMEBACK] = "FrameBack", [KEY_FRAMEFORWARD] = "FrameForward", [KEY_FULL_SCREEN] = "FullScreen", [KEY_GAMES] = "Games", [KEY_GRAPHICSEDITOR] = "GraphicsEditor", [KEY_HANGUP_PHONE] = "HangUpPhone", [KEY_IMAGES] = "Images", [KEY_KBD_LCD_MENU1] = "KbdLcdMenu1", [KEY_KBD_LCD_MENU2] = "KbdLcdMenu2", [KEY_KBD_LCD_MENU3] = "KbdLcdMenu3", [KEY_KBD_LCD_MENU4] = "KbdLcdMenu4", [KEY_KBD_LCD_MENU5] = "KbdLcdMenu5", [KEY_LEFT_DOWN] = "LeftDown", [KEY_LEFT_UP] = "LeftUp", [KEY_LIGHTS_TOGGLE] = "LightToggle", [KEY_MACRO_PRESET1] = "MacroPreset1", [KEY_MACRO_PRESET2] = "MacroPreset2", [KEY_MACRO_PRESET3] = "MacroPrest3", [KEY_MACRO_PRESET_CYCLE] = "MacroPresetCycle", [KEY_MACRO_RECORD_START] = "MacroRecordStart", [KEY_MACRO_RECORD_STOP] = "MacroRecordStop", [KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat", [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger", [KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo", [KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement", [KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter", [KEY_NUMERIC_0] = "Numeric0", [KEY_NUMERIC_1] = "Numeric1", [KEY_NUMERIC_11] = "Numceric11", [KEY_NUMERIC_12] = "Numeric12", [KEY_NUMERIC_2] = "Numeric2", [KEY_NUMERIC_3] = "Numeric3", [KEY_NUMERIC_4] = "Numeric4", [KEY_NUMERIC_5] = "Numeric5", [KEY_NUMERIC_6] = "Numeric6", [KEY_NUMERIC_7] = "Numeric7", [KEY_NUMERIC_8] = "Numeric8", [KEY_NUMERIC_9] = "Numeric9", [KEY_NUMERIC_A] = "NumericA", [KEY_NUMERIC_B] = "NumericB", [KEY_NUMERIC_C] = "NumericC", [KEY_NUMERIC_D] = "NumericD", [KEY_NUMERIC_POUND] = "NumericPound", [KEY_NUMERIC_STAR] = "NumericStar", [KEY_ONSCREEN_KEYBOARD] = "OnScreenKeyBoard", [KEY_PAUSE_RECORD] = "PauseRecord", [KEY_PICKUP_PHONE] = "PickUpPhone", [KEY_PRESENTATION] = "Presentation", [KEY_PREVIOUS_ELEMENT] = "PreviousElement", [KEY_PRIVACY_SCREEN_TOGGLE] = "PrivacyScreenToggle", [KEY_RADAR_OVERLAY] = "RadarOverLay", [KEY_RFKILL] = "RFKill", [KEY_RIGHT_DOWN] = "RightDown", [KEY_RIGHT_UP] = "RightUp", [KEY_ROOT_MENU] = "RootMenu", [KEY_ROTATE_LOCK_TOGGLE] = "RotateLockToggle", [KEY_SCALE] = "Scale", [KEY_SELECTIVE_SCREENSHOT] = "SelectiveScreenshot", [KEY_SIDEVU_SONAR] = "SideVUSonar", [KEY_SINGLE_RANGE_RADAR] = "SingleRangeRadar", [KEY_SLOWREVERSE] = "SlowReverse", [KEY_SOS] = "SOS", [KEY_SPREADSHEET] = "SpreadSheet", [KEY_STOP_RECORD] = "StopRecord", [KEY_TOUCHPAD_OFF] = "TouchPadOff", [KEY_TOUCHPAD_ON] = "TouchPadOn", [KEY_TOUCHPAD_TOGGLE] = "TouchPadToggle", [KEY_TRADITIONAL_SONAR] = "TraditionalSonar", [KEY_UNMUTE] = "Unmute", [KEY_UWB] = "UWB", [KEY_VIDEO_NEXT] = "VideoNext", [KEY_VIDEOPHONE] = "VideoPhone", [KEY_VIDEO_PREV] = "VideoPrev", [KEY_VOD] = "VOD", [KEY_VOICEMAIL] = "VoiceMail", [KEY_WLAN] = "WLAN", [KEY_WORDPROCESSOR] = "WordProcessor", [KEY_WPS_BUTTON] = "WPSButton", [KEY_WWAN] = "WWAN", [KEY_ZOOMIN] = "ZoomIn", [KEY_ZOOMOUT] = "ZoomOut", [KEY_ZOOMRESET] = "ZoomReset", }; static const char *relatives[REL_MAX + 1] = { [REL_X] = "X", [REL_Y] = "Y", [REL_Z] = "Z", [REL_RX] = "Rx", [REL_RY] = "Ry", [REL_RZ] = "Rz", [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial", [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", [REL_WHEEL_HI_RES] = "WheelHiRes", [REL_HWHEEL_HI_RES] = "HWheelHiRes" }; static const char *absolutes[ABS_CNT] = { [ABS_X] = "X", [ABS_Y] = "Y", [ABS_Z] = "Z", [ABS_RX] = "Rx", [ABS_RY] = "Ry", [ABS_RZ] = "Rz", [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder", [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas", [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X", [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X", [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X", [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X", [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure", [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt", [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth", [ABS_VOLUME] = "Volume", [ABS_PROFILE] = "Profile", [ABS_MISC] = "Misc", [ABS_MT_SLOT] = "MTSlot", [ABS_MT_TOUCH_MAJOR] = "MTMajor", [ABS_MT_TOUCH_MINOR] = "MTMinor", [ABS_MT_WIDTH_MAJOR] = "MTMajorW", [ABS_MT_WIDTH_MINOR] = "MTMinorW", [ABS_MT_ORIENTATION] = "MTOrientation", [ABS_MT_POSITION_X] = "MTPositionX", [ABS_MT_POSITION_Y] = "MTPositionY", [ABS_MT_TOOL_TYPE] = "MTToolType", [ABS_MT_BLOB_ID] = "MTBlobID", [ABS_MT_TRACKING_ID] = "MTTrackingID", [ABS_MT_PRESSURE] = "MTPressure", [ABS_MT_DISTANCE] = "MTDistance", [ABS_MT_TOOL_X] = "MTToolX", [ABS_MT_TOOL_Y] = "MTToolY", }; static const char *misc[MSC_MAX + 1] = { [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled", [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData", [MSC_SCAN] = "Scan", [MSC_TIMESTAMP] = "TimeStamp", }; static const char *leds[LED_MAX + 1] = { [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock", [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose", [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep", [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute", [LED_MISC] = "Misc", [LED_MAIL] = "Mail", [LED_CHARGING] = "Charging", }; static const char *repeats[REP_MAX + 1] = { [REP_DELAY] = "Delay", [REP_PERIOD] = "Period" }; static const char *sounds[SND_MAX + 1] = { [SND_CLICK] = "Click", [SND_BELL] = "Bell", [SND_TONE] = "Tone" }; static const char *software[SW_CNT] = { [SW_LID] = "Lid", [SW_TABLET_MODE] = "TabletMode", [SW_HEADPHONE_INSERT] = "HeadPhoneInsert", [SW_RFKILL_ALL] = "RFKillAll", [SW_MICROPHONE_INSERT] = "MicrophoneInsert", [SW_DOCK] = "Dock", [SW_LINEOUT_INSERT] = "LineOutInsert", [SW_JACK_PHYSICAL_INSERT] = "JackPhysicalInsert", [SW_VIDEOOUT_INSERT] = "VideoOutInsert", [SW_CAMERA_LENS_COVER] = "CameraLensCover", [SW_KEYPAD_SLIDE] = "KeyPadSlide", [SW_FRONT_PROXIMITY] = "FrontProximity", [SW_ROTATE_LOCK] = "RotateLock", [SW_LINEIN_INSERT] = "LineInInsert", [SW_MUTE_DEVICE] = "MuteDevice", [SW_PEN_INSERTED] = "PenInserted", [SW_MACHINE_COVER] = "MachineCover", }; static const char *force[FF_CNT] = { [FF_RUMBLE] = "FF_RUMBLE", [FF_PERIODIC] = "FF_PERIODIC", [FF_CONSTANT] = "FF_CONSTANT", [FF_SPRING] = "FF_SPRING", [FF_FRICTION] = "FF_FRICTION", [FF_DAMPER] = "FF_DAMPER", [FF_INERTIA] = "FF_INERTIA", [FF_RAMP] = "FF_RAMP", [FF_SQUARE] = "FF_SQUARE", [FF_TRIANGLE] = "FF_TRIANGLE", [FF_SINE] = "FF_SINE", [FF_SAW_UP] = "FF_SAW_UP", [FF_SAW_DOWN] = "FF_SAW_DOWN", [FF_CUSTOM] = "FF_CUSTOM", [FF_GAIN] = "FF_GAIN", [FF_AUTOCENTER] = "FF_AUTOCENTER", [FF_MAX] = "FF_MAX", }; static const char *force_status[FF_STATUS_MAX + 1] = { [FF_STATUS_STOPPED] = "FF_STATUS_STOPPED", [FF_STATUS_PLAYING] = "FF_STATUS_PLAYING", }; static const char **names[EV_MAX + 1] = { [EV_SYN] = syncs, [EV_KEY] = keys, [EV_REL] = relatives, [EV_ABS] = absolutes, [EV_MSC] = misc, [EV_LED] = leds, [EV_SND] = sounds, [EV_REP] = repeats, [EV_SW] = software, [EV_FF] = force, [EV_FF_STATUS] = force_status, }; static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) { if (events[type]) seq_printf(f, "%s.", events[type]); else seq_printf(f, "%02x.", type); if (names[type] && names[type][code]) seq_printf(f, "%s", names[type][code]); else seq_printf(f, "%04x", code); } static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) { int i, j, k; struct hid_report *report; struct hid_usage *usage; for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { list_for_each_entry(report, &hid->report_enum[k].report_list, list) { for (i = 0; i < report->maxfield; i++) { for ( j = 0; j < report->field[i]->maxusage; j++) { usage = report->field[i]->usage + j; hid_resolv_usage(usage->hid, f); seq_printf(f, " ---> "); hid_resolv_event(usage->type, usage->code, f); seq_printf(f, "\n"); } } } } } static int hid_debug_rdesc_show(struct seq_file *f, void *p) { struct hid_device *hdev = f->private; const __u8 *rdesc = hdev->rdesc; unsigned rsize = hdev->rsize; int i; if (!rdesc) { rdesc = hdev->dev_rdesc; rsize = hdev->dev_rsize; } /* dump HID report descriptor */ for (i = 0; i < rsize; i++) seq_printf(f, "%02x ", rdesc[i]); seq_printf(f, "\n\n"); /* dump parsed data and input mappings */ if (down_interruptible(&hdev->driver_input_lock)) return 0; hid_dump_device(hdev, f); seq_printf(f, "\n"); hid_dump_input_mapping(hdev, f); up(&hdev->driver_input_lock); return 0; } static int hid_debug_events_open(struct inode *inode, struct file *file) { int err = 0; struct hid_debug_list *list; unsigned long flags; if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) { err = -ENOMEM; goto out; } err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); if (err) { kfree(list); goto out; } list->hdev = (struct hid_device *) inode->i_private; kref_get(&list->hdev->ref); file->private_data = list; mutex_init(&list->read_mutex); spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_add_tail(&list->node, &list->hdev->debug_list); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); out: return err; } static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hid_debug_list *list = file->private_data; int ret = 0, copied; DECLARE_WAITQUEUE(wait, current); mutex_lock(&list->read_mutex); if (kfifo_is_empty(&list->hid_debug_fifo)) { add_wait_queue(&list->hdev->debug_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (kfifo_is_empty(&list->hid_debug_fifo)) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } /* if list->hdev is NULL we cannot remove_wait_queue(). * if list->hdev->debug is 0 then hid_debug_unregister() * was already called and list->hdev is being destroyed. * if we add remove_wait_queue() here we can hit a race. */ if (!list->hdev || !list->hdev->debug) { ret = -EIO; set_current_state(TASK_RUNNING); goto out; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } /* allow O_NONBLOCK from other threads */ mutex_unlock(&list->read_mutex); schedule(); mutex_lock(&list->read_mutex); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); remove_wait_queue(&list->hdev->debug_wait, &wait); if (ret) goto out; } /* pass the fifo content to userspace, locking is not needed with only * one concurrent reader and one concurrent writer */ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); if (ret) goto out; ret = copied; out: mutex_unlock(&list->read_mutex); return ret; } static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) { struct hid_debug_list *list = file->private_data; poll_wait(file, &list->hdev->debug_wait, wait); if (!kfifo_is_empty(&list->hid_debug_fifo)) return EPOLLIN | EPOLLRDNORM; if (!list->hdev->debug) return EPOLLERR | EPOLLHUP; return 0; } static int hid_debug_events_release(struct inode *inode, struct file *file) { struct hid_debug_list *list = file->private_data; unsigned long flags; spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); kfifo_free(&list->hid_debug_fifo); kref_put(&list->hdev->ref, hiddev_free); kfree(list); return 0; } DEFINE_SHOW_ATTRIBUTE(hid_debug_rdesc); static const struct file_operations hid_debug_events_fops = { .owner = THIS_MODULE, .open = hid_debug_events_open, .read = hid_debug_events_read, .poll = hid_debug_events_poll, .release = hid_debug_events_release, .llseek = noop_llseek, }; void hid_debug_register(struct hid_device *hdev, const char *name) { hdev->debug_dir = debugfs_create_dir(name, hid_debug_root); hdev->debug_rdesc = debugfs_create_file("rdesc", 0400, hdev->debug_dir, hdev, &hid_debug_rdesc_fops); hdev->debug_events = debugfs_create_file("events", 0400, hdev->debug_dir, hdev, &hid_debug_events_fops); hdev->debug = 1; } void hid_debug_unregister(struct hid_device *hdev) { hdev->debug = 0; wake_up_interruptible(&hdev->debug_wait); debugfs_remove(hdev->debug_rdesc); debugfs_remove(hdev->debug_events); debugfs_remove(hdev->debug_dir); } void hid_debug_init(void) { hid_debug_root = debugfs_create_dir("hid", NULL); } void hid_debug_exit(void) { debugfs_remove_recursive(hid_debug_root); }
244 113 151 150 113 214 215 215 214 215 25 25 107 107 107 107 107 21 21 105 150 148 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 // SPDX-License-Identifier: GPL-2.0 /* * Lockless hierarchical page accounting & limiting * * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner */ #include <linux/page_counter.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/bug.h> #include <asm/page.h> static bool track_protection(struct page_counter *c) { return c->protection_support; } static void propagate_protected_usage(struct page_counter *c, unsigned long usage) { unsigned long protected, old_protected; long delta; if (!c->parent) return; protected = min(usage, READ_ONCE(c->min)); old_protected = atomic_long_read(&c->min_usage); if (protected != old_protected) { old_protected = atomic_long_xchg(&c->min_usage, protected); delta = protected - old_protected; if (delta) atomic_long_add(delta, &c->parent->children_min_usage); } protected = min(usage, READ_ONCE(c->low)); old_protected = atomic_long_read(&c->low_usage); if (protected != old_protected) { old_protected = atomic_long_xchg(&c->low_usage, protected); delta = protected - old_protected; if (delta) atomic_long_add(delta, &c->parent->children_low_usage); } } /** * page_counter_cancel - take pages out of the local counter * @counter: counter * @nr_pages: number of pages to cancel */ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) { long new; new = atomic_long_sub_return(nr_pages, &counter->usage); /* More uncharges than charges? */ if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n", new, nr_pages)) { new = 0; atomic_long_set(&counter->usage, new); } if (track_protection(counter)) propagate_protected_usage(counter, new); } /** * page_counter_charge - hierarchically charge pages * @counter: counter * @nr_pages: number of pages to charge * * NOTE: This does not consider any configured counter limits. */ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; bool protection = track_protection(counter); for (c = counter; c; c = c->parent) { long new; new = atomic_long_add_return(nr_pages, &c->usage); if (protection) propagate_protected_usage(c, new); /* * This is indeed racy, but we can live with some * inaccuracy in the watermark. * * Notably, we have two watermarks to allow for both a globally * visible peak and one that can be reset at a smaller scope. * * Since we reset both watermarks when the global reset occurs, * we can guarantee that watermark >= local_watermark, so we * don't need to do both comparisons every time. * * On systems with branch predictors, the inner condition should * be almost free. */ if (new > READ_ONCE(c->local_watermark)) { WRITE_ONCE(c->local_watermark, new); if (new > READ_ONCE(c->watermark)) WRITE_ONCE(c->watermark, new); } } } /** * page_counter_try_charge - try to hierarchically charge pages * @counter: counter * @nr_pages: number of pages to charge * @fail: points first counter to hit its limit, if any * * Returns %true on success, or %false and @fail if the counter or one * of its ancestors has hit its configured limit. */ bool page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail) { struct page_counter *c; bool protection = track_protection(counter); bool track_failcnt = counter->track_failcnt; for (c = counter; c; c = c->parent) { long new; /* * Charge speculatively to avoid an expensive CAS. If * a bigger charge fails, it might falsely lock out a * racing smaller charge and send it into reclaim * early, but the error is limited to the difference * between the two sizes, which is less than 2M/4M in * case of a THP locking out a regular page charge. * * The atomic_long_add_return() implies a full memory * barrier between incrementing the count and reading * the limit. When racing with page_counter_set_max(), * we either see the new limit or the setter sees the * counter has changed and retries. */ new = atomic_long_add_return(nr_pages, &c->usage); if (new > c->max) { atomic_long_sub(nr_pages, &c->usage); /* * This is racy, but we can live with some * inaccuracy in the failcnt which is only used * to report stats. */ if (track_failcnt) data_race(c->failcnt++); *fail = c; goto failed; } if (protection) propagate_protected_usage(c, new); /* see comment on page_counter_charge */ if (new > READ_ONCE(c->local_watermark)) { WRITE_ONCE(c->local_watermark, new); if (new > READ_ONCE(c->watermark)) WRITE_ONCE(c->watermark, new); } } return true; failed: for (c = counter; c != *fail; c = c->parent) page_counter_cancel(c, nr_pages); return false; } /** * page_counter_uncharge - hierarchically uncharge pages * @counter: counter * @nr_pages: number of pages to uncharge */ void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; for (c = counter; c; c = c->parent) page_counter_cancel(c, nr_pages); } /** * page_counter_set_max - set the maximum number of pages allowed * @counter: counter * @nr_pages: limit to set * * Returns 0 on success, -EBUSY if the current number of pages on the * counter already exceeds the specified limit. * * The caller must serialize invocations on the same counter. */ int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages) { for (;;) { unsigned long old; long usage; /* * Update the limit while making sure that it's not * below the concurrently-changing counter value. * * The xchg implies two full memory barriers before * and after, so the read-swap-read is ordered and * ensures coherency with page_counter_try_charge(): * that function modifies the count before checking * the limit, so if it sees the old limit, we see the * modified counter and retry. */ usage = page_counter_read(counter); if (usage > nr_pages) return -EBUSY; old = xchg(&counter->max, nr_pages); if (page_counter_read(counter) <= usage || nr_pages >= old) return 0; counter->max = old; cond_resched(); } } /** * page_counter_set_min - set the amount of protected memory * @counter: counter * @nr_pages: value to set * * The caller must serialize invocations on the same counter. */ void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; WRITE_ONCE(counter->min, nr_pages); for (c = counter; c; c = c->parent) propagate_protected_usage(c, atomic_long_read(&c->usage)); } /** * page_counter_set_low - set the amount of protected memory * @counter: counter * @nr_pages: value to set * * The caller must serialize invocations on the same counter. */ void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; WRITE_ONCE(counter->low, nr_pages); for (c = counter; c; c = c->parent) propagate_protected_usage(c, atomic_long_read(&c->usage)); } /** * page_counter_memparse - memparse() for page counter limits * @buf: string to parse * @max: string meaning maximum possible value * @nr_pages: returns the result in number of pages * * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be * limited to %PAGE_COUNTER_MAX. */ int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages) { char *end; u64 bytes; if (!strcmp(buf, max)) { *nr_pages = PAGE_COUNTER_MAX; return 0; } bytes = memparse(buf, &end); if (*end != '\0') return -EINVAL; *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); return 0; } #if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM) /* * This function calculates an individual page counter's effective * protection which is derived from its own memory.min/low, its * parent's and siblings' settings, as well as the actual memory * distribution in the tree. * * The following rules apply to the effective protection values: * * 1. At the first level of reclaim, effective protection is equal to * the declared protection in memory.min and memory.low. * * 2. To enable safe delegation of the protection configuration, at * subsequent levels the effective protection is capped to the * parent's effective protection. * * 3. To make complex and dynamic subtrees easier to configure, the * user is allowed to overcommit the declared protection at a given * level. If that is the case, the parent's effective protection is * distributed to the children in proportion to how much protection * they have declared and how much of it they are utilizing. * * This makes distribution proportional, but also work-conserving: * if one counter claims much more protection than it uses memory, * the unused remainder is available to its siblings. * * 4. Conversely, when the declared protection is undercommitted at a * given level, the distribution of the larger parental protection * budget is NOT proportional. A counter's protection from a sibling * is capped to its own memory.min/low setting. * * 5. However, to allow protecting recursive subtrees from each other * without having to declare each individual counter's fixed share * of the ancestor's claim to protection, any unutilized - * "floating" - protection from up the tree is distributed in * proportion to each counter's *usage*. This makes the protection * neutral wrt sibling cgroups and lets them compete freely over * the shared parental protection budget, but it protects the * subtree as a whole from neighboring subtrees. * * Note that 4. and 5. are not in conflict: 4. is about protecting * against immediate siblings whereas 5. is about protecting against * neighboring subtrees. */ static unsigned long effective_protection(unsigned long usage, unsigned long parent_usage, unsigned long setting, unsigned long parent_effective, unsigned long siblings_protected, bool recursive_protection) { unsigned long protected; unsigned long ep; protected = min(usage, setting); /* * If all cgroups at this level combined claim and use more * protection than what the parent affords them, distribute * shares in proportion to utilization. * * We are using actual utilization rather than the statically * claimed protection in order to be work-conserving: claimed * but unused protection is available to siblings that would * otherwise get a smaller chunk than what they claimed. */ if (siblings_protected > parent_effective) return protected * parent_effective / siblings_protected; /* * Ok, utilized protection of all children is within what the * parent affords them, so we know whatever this child claims * and utilizes is effectively protected. * * If there is unprotected usage beyond this value, reclaim * will apply pressure in proportion to that amount. * * If there is unutilized protection, the cgroup will be fully * shielded from reclaim, but we do return a smaller value for * protection than what the group could enjoy in theory. This * is okay. With the overcommit distribution above, effective * protection is always dependent on how memory is actually * consumed among the siblings anyway. */ ep = protected; /* * If the children aren't claiming (all of) the protection * afforded to them by the parent, distribute the remainder in * proportion to the (unprotected) memory of each cgroup. That * way, cgroups that aren't explicitly prioritized wrt each * other compete freely over the allowance, but they are * collectively protected from neighboring trees. * * We're using unprotected memory for the weight so that if * some cgroups DO claim explicit protection, we don't protect * the same bytes twice. * * Check both usage and parent_usage against the respective * protected values. One should imply the other, but they * aren't read atomically - make sure the division is sane. */ if (!recursive_protection) return ep; if (parent_effective > siblings_protected && parent_usage > siblings_protected && usage > protected) { unsigned long unclaimed; unclaimed = parent_effective - siblings_protected; unclaimed *= usage - protected; unclaimed /= parent_usage - siblings_protected; ep += unclaimed; } return ep; } /** * page_counter_calculate_protection - check if memory consumption is in the normal range * @root: the top ancestor of the sub-tree being checked * @counter: the page_counter the counter to update * @recursive_protection: Whether to use memory_recursiveprot behavior. * * Calculates elow/emin thresholds for given page_counter. * * WARNING: This function is not stateless! It can only be used as part * of a top-down tree iteration, not for isolated queries. */ void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection) { unsigned long usage, parent_usage; struct page_counter *parent = counter->parent; /* * Effective values of the reclaim targets are ignored so they * can be stale. Have a look at mem_cgroup_protection for more * details. * TODO: calculation should be more robust so that we do not need * that special casing. */ if (root == counter) return; usage = page_counter_read(counter); if (!usage) return; if (parent == root) { counter->emin = READ_ONCE(counter->min); counter->elow = READ_ONCE(counter->low); return; } parent_usage = page_counter_read(parent); WRITE_ONCE(counter->emin, effective_protection(usage, parent_usage, READ_ONCE(counter->min), READ_ONCE(parent->emin), atomic_long_read(&parent->children_min_usage), recursive_protection)); WRITE_ONCE(counter->elow, effective_protection(usage, parent_usage, READ_ONCE(counter->low), READ_ONCE(parent->elow), atomic_long_read(&parent->children_low_usage), recursive_protection)); } #endif /* CONFIG_MEMCG || CONFIG_CGROUP_DMEM */
1 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2009 Peter Holik * * Intellon usb PLC (Powerline Communications) usb net driver * * http://www.tandel.be/downloads/INT51X1_Datasheet.pdf * * Based on the work of Jan 'RedBully' Seiffert */ /* */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/slab.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> #define INT51X1_VENDOR_ID 0x09e1 #define INT51X1_PRODUCT_ID 0x5121 #define INT51X1_HEADER_SIZE 2 /* 2 byte header */ #define PACKET_TYPE_PROMISCUOUS (1 << 0) #define PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */ #define PACKET_TYPE_DIRECTED (1 << 2) #define PACKET_TYPE_BROADCAST (1 << 3) #define PACKET_TYPE_MULTICAST (1 << 4) /* filtered */ #define SET_ETHERNET_PACKET_FILTER 0x43 static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { int len; if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) { netdev_err(dev->net, "unexpected tiny rx frame\n"); return 0; } len = le16_to_cpu(*(__le16 *)&skb->data[skb->len - 2]); skb_trim(skb, len); return 1; } static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int pack_len = skb->len; int pack_with_header_len = pack_len + INT51X1_HEADER_SIZE; int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); int need_tail = 0; __le16 *len; /* if packet and our header is smaller than 64 pad to 64 (+ ZLP) */ if ((pack_with_header_len) < dev->maxpacket) need_tail = dev->maxpacket - pack_with_header_len + 1; /* * usbnet would send a ZLP if packetlength mod urbsize == 0 for us, * but we need to know ourself, because this would add to the length * we send down to the device... */ else if (!(pack_with_header_len % dev->maxpacket)) need_tail = 1; if (!skb_cloned(skb) && (headroom + tailroom >= need_tail + INT51X1_HEADER_SIZE)) { if (headroom < INT51X1_HEADER_SIZE || tailroom < need_tail) { skb->data = memmove(skb->head + INT51X1_HEADER_SIZE, skb->data, skb->len); skb_set_tail_pointer(skb, skb->len); } } else { struct sk_buff *skb2; skb2 = skb_copy_expand(skb, INT51X1_HEADER_SIZE, need_tail, flags); dev_kfree_skb_any(skb); if (!skb2) return NULL; skb = skb2; } pack_len += need_tail; pack_len &= 0x07ff; len = __skb_push(skb, INT51X1_HEADER_SIZE); *len = cpu_to_le16(pack_len); if(need_tail) __skb_put_zero(skb, need_tail); return skb; } static void int51x1_set_multicast(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST; if (netdev->flags & IFF_PROMISC) { /* do not expect to see traffic of other PLCs */ filter |= PACKET_TYPE_PROMISCUOUS; netdev_info(dev->net, "promiscuous mode enabled\n"); } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { filter |= PACKET_TYPE_ALL_MULTICAST; netdev_dbg(dev->net, "receive all multicast enabled\n"); } else { /* ~PROMISCUOUS, ~MULTICAST */ netdev_dbg(dev->net, "receive own packets only\n"); } usbnet_write_cmd_async(dev, SET_ETHERNET_PACKET_FILTER, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, filter, 0, NULL, 0); } static const struct net_device_ops int51x1_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = int51x1_set_multicast, }; static int int51x1_bind(struct usbnet *dev, struct usb_interface *intf) { int status = usbnet_get_ethernet_addr(dev, 3); if (status) return status; dev->net->hard_header_len += INT51X1_HEADER_SIZE; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->net->netdev_ops = &int51x1_netdev_ops; return usbnet_get_endpoints(dev, intf); } static const struct driver_info int51x1_info = { .description = "Intellon usb powerline adapter", .bind = int51x1_bind, .rx_fixup = int51x1_rx_fixup, .tx_fixup = int51x1_tx_fixup, .in = 1, .out = 2, .flags = FLAG_ETHER, }; static const struct usb_device_id products[] = { { USB_DEVICE(INT51X1_VENDOR_ID, INT51X1_PRODUCT_ID), .driver_info = (unsigned long) &int51x1_info, }, {}, }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver int51x1_driver = { .name = "int51x1", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(int51x1_driver); MODULE_AUTHOR("Peter Holik"); MODULE_DESCRIPTION("Intellon usb powerline adapter"); MODULE_LICENSE("GPL");
69 68 69 280 288 10 290 69 67 70 70 168 62 69 167 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Credentials management - see Documentation/security/credentials.rst * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_CRED_H #define _LINUX_CRED_H #include <linux/capability.h> #include <linux/init.h> #include <linux/key.h> #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/uidgid.h> #include <linux/sched.h> #include <linux/sched/user.h> struct cred; struct inode; /* * COW Supplementary groups list */ struct group_info { refcount_t usage; int ngroups; kgid_t gid[]; } __randomize_layout; /** * get_group_info - Get a reference to a group info structure * @group_info: The group info to reference * * This gets a reference to a set of supplementary groups. * * If the caller is accessing a task's credentials, they must hold the RCU read * lock when reading. */ static inline struct group_info *get_group_info(struct group_info *gi) { refcount_inc(&gi->usage); return gi; } /** * put_group_info - Release a reference to a group info structure * @group_info: The group info to release */ #define put_group_info(group_info) \ do { \ if (refcount_dec_and_test(&(group_info)->usage)) \ groups_free(group_info); \ } while (0) #ifdef CONFIG_MULTIUSER extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); extern int in_group_p(kgid_t); extern int in_egroup_p(kgid_t); extern int groups_search(const struct group_info *, kgid_t); extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern bool may_setgroups(void); extern void groups_sort(struct group_info *); #else static inline void groups_free(struct group_info *group_info) { } static inline int in_group_p(kgid_t grp) { return 1; } static inline int in_egroup_p(kgid_t grp) { return 1; } static inline int groups_search(const struct group_info *group_info, kgid_t grp) { return 1; } #endif /* * The security context of a task * * The parts of the context break down into two categories: * * (1) The objective context of a task. These parts are used when some other * task is attempting to affect this one. * * (2) The subjective context. These details are used when the task is acting * upon another object, be that a file, a task, a key or whatever. * * Note that some members of this structure belong to both categories - the * LSM security pointer for instance. * * A task has two security pointers. task->real_cred points to the objective * context that defines that task's actual details. The objective part of this * context is used whenever that task is acted upon. * * task->cred points to the subjective context that defines the details of how * that task is going to act upon another object. This may be overridden * temporarily to point to another security context, but normally points to the * same context as task->real_cred. */ struct cred { atomic_long_t usage; kuid_t uid; /* real UID of the task */ kgid_t gid; /* real GID of the task */ kuid_t suid; /* saved UID of the task */ kgid_t sgid; /* saved GID of the task */ kuid_t euid; /* effective UID of the task */ kgid_t egid; /* effective GID of the task */ kuid_t fsuid; /* UID for VFS ops */ kgid_t fsgid; /* GID for VFS ops */ unsigned securebits; /* SUID-less security management */ kernel_cap_t cap_inheritable; /* caps our children can inherit */ kernel_cap_t cap_permitted; /* caps we're permitted */ kernel_cap_t cap_effective; /* caps we can actually use */ kernel_cap_t cap_bset; /* capability bounding set */ kernel_cap_t cap_ambient; /* Ambient capability set */ #ifdef CONFIG_KEYS unsigned char jit_keyring; /* default keyring to attach requested * keys to */ struct key *session_keyring; /* keyring inherited over fork */ struct key *process_keyring; /* keyring private to this process */ struct key *thread_keyring; /* keyring private to this thread */ struct key *request_key_auth; /* assumed request_key authority */ #endif #ifdef CONFIG_SECURITY void *security; /* LSM security */ #endif struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ struct ucounts *ucounts; struct group_info *group_info; /* supplementary groups for euid/fsgid */ /* RCU deletion */ union { int non_rcu; /* Can we skip RCU deletion? */ struct rcu_head rcu; /* RCU deletion hook */ }; } __randomize_layout; extern void __put_cred(struct cred *); extern void exit_creds(struct task_struct *); extern int copy_creds(struct task_struct *, unsigned long); extern const struct cred *get_task_cred(struct task_struct *); extern struct cred *cred_alloc_blank(void); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); extern struct cred *prepare_kernel_cred(struct task_struct *); extern int set_security_override(struct cred *, u32); extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); extern int set_cred_ucounts(struct cred *); static inline bool cap_ambient_invariant_ok(const struct cred *cred) { return cap_issubset(cred->cap_ambient, cap_intersect(cred->cap_permitted, cred->cap_inheritable)); } static inline const struct cred *override_creds(const struct cred *override_cred) { return rcu_replace_pointer(current->cred, override_cred, 1); } static inline const struct cred *revert_creds(const struct cred *revert_cred) { return rcu_replace_pointer(current->cred, revert_cred, 1); } /** * get_cred_many - Get references on a set of credentials * @cred: The credentials to reference * @nr: Number of references to acquire * * Get references on the specified set of credentials. The caller must release * all acquired reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the * usage count. The purpose of this is to attempt to catch at compile time the * accidental alteration of a set of credentials that should be considered * immutable. */ static inline const struct cred *get_cred_many(const struct cred *cred, int nr) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return cred; nonconst_cred->non_rcu = 0; atomic_long_add(nr, &nonconst_cred->usage); return cred; } /* * get_cred - Get a reference on a set of credentials * @cred: The credentials to reference * * Get a reference on the specified set of credentials. The caller must * release the reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. */ static inline const struct cred *get_cred(const struct cred *cred) { return get_cred_many(cred, 1); } static inline const struct cred *get_cred_rcu(const struct cred *cred) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return NULL; if (!atomic_long_inc_not_zero(&nonconst_cred->usage)) return NULL; nonconst_cred->non_rcu = 0; return cred; } /** * put_cred - Release a reference to a set of credentials * @cred: The credentials to release * @nr: Number of references to release * * Release a reference to a set of credentials, deleting them when the last ref * is released. If %NULL is passed, nothing is done. * * This takes a const pointer to a set of credentials because the credentials * on task_struct are attached by const pointers to prevent accidental * alteration of otherwise immutable credential sets. */ static inline void put_cred_many(const struct cred *_cred, int nr) { struct cred *cred = (struct cred *) _cred; if (cred) { if (atomic_long_sub_and_test(nr, &cred->usage)) __put_cred(cred); } } /* * put_cred - Release a reference to a set of credentials * @cred: The credentials to release * * Release a reference to a set of credentials, deleting them when the last ref * is released. If %NULL is passed, nothing is done. */ static inline void put_cred(const struct cred *cred) { put_cred_many(cred, 1); } /** * current_cred - Access the current task's subjective credentials * * Access the subjective credentials of the current task. RCU-safe, * since nobody else can modify it. */ #define current_cred() \ rcu_dereference_protected(current->cred, 1) /** * current_real_cred - Access the current task's objective credentials * * Access the objective credentials of the current task. RCU-safe, * since nobody else can modify it. */ #define current_real_cred() \ rcu_dereference_protected(current->real_cred, 1) /** * __task_cred - Access a task's objective credentials * @task: The task to query * * Access the objective credentials of a task. The caller must hold the RCU * readlock. * * The result of this function should not be passed directly to get_cred(); * rather get_task_cred() should be used instead. */ #define __task_cred(task) \ rcu_dereference((task)->real_cred) /** * get_current_cred - Get the current task's subjective credentials * * Get the subjective credentials of the current task, pinning them so that * they can't go away. Accessing the current task's credentials directly is * not permitted. */ #define get_current_cred() \ (get_cred(current_cred())) /** * get_current_user - Get the current task's user_struct * * Get the user record of the current task, pinning it so that it can't go * away. */ #define get_current_user() \ ({ \ struct user_struct *__u; \ const struct cred *__cred; \ __cred = current_cred(); \ __u = get_uid(__cred->user); \ __u; \ }) /** * get_current_groups - Get the current task's supplementary group list * * Get the supplementary group list of the current task, pinning it so that it * can't go away. */ #define get_current_groups() \ ({ \ struct group_info *__groups; \ const struct cred *__cred; \ __cred = current_cred(); \ __groups = get_group_info(__cred->group_info); \ __groups; \ }) #define task_cred_xxx(task, xxx) \ ({ \ __typeof__(((struct cred *)NULL)->xxx) ___val; \ rcu_read_lock(); \ ___val = __task_cred((task))->xxx; \ rcu_read_unlock(); \ ___val; \ }) #define task_uid(task) (task_cred_xxx((task), uid)) #define task_euid(task) (task_cred_xxx((task), euid)) #define task_ucounts(task) (task_cred_xxx((task), ucounts)) #define current_cred_xxx(xxx) \ ({ \ current_cred()->xxx; \ }) #define current_uid() (current_cred_xxx(uid)) #define current_gid() (current_cred_xxx(gid)) #define current_euid() (current_cred_xxx(euid)) #define current_egid() (current_cred_xxx(egid)) #define current_suid() (current_cred_xxx(suid)) #define current_sgid() (current_cred_xxx(sgid)) #define current_fsuid() (current_cred_xxx(fsuid)) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) #define current_ucounts() (current_cred_xxx(ucounts)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS #define current_user_ns() (current_cred_xxx(user_ns)) #else static inline struct user_namespace *current_user_ns(void) { return &init_user_ns; } #endif #define current_uid_gid(_uid, _gid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_uid) = __cred->uid; \ *(_gid) = __cred->gid; \ } while(0) #define current_euid_egid(_euid, _egid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_euid) = __cred->euid; \ *(_egid) = __cred->egid; \ } while(0) #define current_fsuid_fsgid(_fsuid, _fsgid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_fsuid) = __cred->fsuid; \ *(_fsgid) = __cred->fsgid; \ } while(0) #endif /* _LINUX_CRED_H */
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 // SPDX-License-Identifier: GPL-2.0 /* * HID driver for Maltron L90 * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2012 David Dillow <dave@thedillows.org> * Copyright (c) 2006-2013 Jiri Kosina * Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com> * Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com> * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> * Copyright (c) 2018 William Whistler <wtbw@wtbw.co.uk> */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* The original buggy USB descriptor */ static const u8 maltron_rdesc_o[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x80, /* Usage (Sys Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x02, /* Report ID (2) */ 0x75, 0x01, /* Report Size (1) */ 0x95, 0x01, /* Report Count (1) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x83, /* Usage (Sys Wake Up) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x75, 0x05, /* Report Size (5) */ 0x81, 0x01, /* Input (Const,Array,Abs) */ 0xC0, /* End Collection */ 0x05, 0x0C, /* Usage Page (Consumer) */ 0x09, 0x01, /* Usage (Consumer Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x03, /* Report ID (3) */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (Unassigned) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0xC0, /* End Collection */ 0x06, 0x7F, 0xFF, /* Usage Page (Vendor Defined 0xFF7F) */ 0x09, 0x01, /* Usage (0x01) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x04, /* Report ID (4) */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (0x00) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0x75, 0x02, /* Report Size (2) */ 0x25, 0x02, /* Logical Maximum (2) */ 0x09, 0x90, /* Usage (0x90) */ 0xB1, 0x02, /* Feature (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0xB1, 0x01, /* Feature (Const,Array,Abs) */ 0x75, 0x01, /* Report Size (1) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x05, 0x08, /* Usage Page (LEDs) */ 0x09, 0x2A, /* Usage (On-Line) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x09, 0x4B, /* Usage (Generic Indicator) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0x95, 0x01, /* Report Count (1) */ 0x91, 0x01, /* Output (Const,Array,Abs) */ 0xC0 /* End Collection */ }; /* The patched descriptor, allowing media key events to be accepted as valid */ static const u8 maltron_rdesc[] = { 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */ 0x09, 0x80, /* Usage (Sys Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x02, /* Report ID (2) */ 0x75, 0x01, /* Report Size (1) */ 0x95, 0x01, /* Report Count (1) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x82, /* Usage (Sys Sleep) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x09, 0x83, /* Usage (Sys Wake Up) */ 0x81, 0x06, /* Input (Data,Var,Rel) */ 0x75, 0x05, /* Report Size (5) */ 0x81, 0x01, /* Input (Const,Array,Abs) */ 0xC0, /* End Collection */ 0x05, 0x0C, /* Usage Page (Consumer) */ 0x09, 0x01, /* Usage (Consumer Control) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x03, /* Report ID (3) */ 0x15, 0x00, /* Logical Minimum (0) - changed */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767) - changed */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (Unassigned) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0xC0, /* End Collection */ 0x06, 0x7F, 0xFF, /* Usage Page (Vendor Defined 0xFF7F) */ 0x09, 0x01, /* Usage (0x01) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x04, /* Report ID (4) */ 0x95, 0x01, /* Report Count (1) */ 0x75, 0x10, /* Report Size (16) */ 0x19, 0x00, /* Usage Minimum (0x00) */ 0x2A, 0xFF, 0x7F, /* Usage Maximum (0x7FFF) */ 0x81, 0x00, /* Input (Data,Array,Abs) */ 0x75, 0x02, /* Report Size (2) */ 0x25, 0x02, /* Logical Maximum (2) */ 0x09, 0x90, /* Usage (0x90) */ 0xB1, 0x02, /* Feature (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0xB1, 0x01, /* Feature (Const,Array,Abs) */ 0x75, 0x01, /* Report Size (1) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x05, 0x08, /* Usage Page (LEDs) */ 0x09, 0x2A, /* Usage (On-Line) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x09, 0x4B, /* Usage (Generic Indicator) */ 0x91, 0x02, /* Output (Data,Var,Abs) */ 0x75, 0x06, /* Report Size (6) */ 0x95, 0x01, /* Report Count (1) */ 0x91, 0x01, /* Output (Const,Array,Abs) */ 0xC0 /* End Collection */ }; static const __u8 *maltron_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == sizeof(maltron_rdesc_o) && !memcmp(maltron_rdesc_o, rdesc, sizeof(maltron_rdesc_o))) { hid_info(hdev, "Replacing Maltron L90 keyboard report descriptor\n"); *rsize = sizeof(maltron_rdesc); return maltron_rdesc; } return rdesc; } static const struct hid_device_id maltron_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_MALTRON_KB)}, { } }; MODULE_DEVICE_TABLE(hid, maltron_devices); static struct hid_driver maltron_driver = { .name = "maltron", .id_table = maltron_devices, .report_fixup = maltron_report_fixup }; module_hid_driver(maltron_driver); MODULE_DESCRIPTION("HID driver for Maltron L90"); MODULE_LICENSE("GPL");
14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_CPUSET_H #define _LINUX_CPUSET_H /* * cpuset interface * * Copyright (C) 2003 BULL SA * Copyright (C) 2004-2006 Silicon Graphics, Inc. * */ #include <linux/sched.h> #include <linux/sched/topology.h> #include <linux/sched/task.h> #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> #include <linux/mmu_context.h> #include <linux/jump_label.h> #ifdef CONFIG_CPUSETS /* * Static branch rewrites can happen in an arbitrary order for a given * key. In code paths where we need to loop with read_mems_allowed_begin() and * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need * to ensure that begin() always gets rewritten before retry() in the * disabled -> enabled transition. If not, then if local irqs are disabled * around the loop, we can deadlock since retry() would always be * comparing the latest value of the mems_allowed seqcount against 0 as * begin() still would see cpusets_enabled() as false. The enabled -> disabled * transition should happen in reverse order for the same reasons (want to stop * looking at real value of mems_allowed.sequence in retry() first). */ extern struct static_key_false cpusets_pre_enable_key; extern struct static_key_false cpusets_enabled_key; extern struct static_key_false cpusets_insane_config_key; static inline bool cpusets_enabled(void) { return static_branch_unlikely(&cpusets_enabled_key); } static inline void cpuset_inc(void) { static_branch_inc_cpuslocked(&cpusets_pre_enable_key); static_branch_inc_cpuslocked(&cpusets_enabled_key); } static inline void cpuset_dec(void) { static_branch_dec_cpuslocked(&cpusets_enabled_key); static_branch_dec_cpuslocked(&cpusets_pre_enable_key); } /* * This will get enabled whenever a cpuset configuration is considered * unsupportable in general. E.g. movable only node which cannot satisfy * any non movable allocations (see update_nodemask). Page allocator * needs to make additional checks for those configurations and this * check is meant to guard those checks without any overhead for sane * configurations. */ static inline bool cpusets_insane_config(void) { return static_branch_unlikely(&cpusets_insane_config_key); } extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); extern void inc_dl_tasks_cs(struct task_struct *task); extern void dec_dl_tasks_cs(struct task_struct *task); extern void cpuset_lock(void); extern void cpuset_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); extern bool cpuset_cpu_is_isolated(int cpu); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return cpuset_node_allowed(zone_to_nid(z), gfp_mask); } static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { if (cpusets_enabled()) return __cpuset_zone_allowed(z, gfp_mask); return true; } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2); #ifdef CONFIG_CPUSETS_V1 #define cpuset_memory_pressure_bump() \ do { \ if (cpuset_memory_pressure_enabled) \ __cpuset_memory_pressure_bump(); \ } while (0) extern int cpuset_memory_pressure_enabled; extern void __cpuset_memory_pressure_bump(void); #else static inline void cpuset_memory_pressure_bump(void) { } #endif extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task); extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); extern int cpuset_mem_spread_node(void); static inline int cpuset_do_page_mem_spread(void) { return task_spread_page(current); } extern bool current_cpuset_is_being_rebound(void); extern void dl_rebuild_rd_accounting(void); extern void rebuild_sched_domains(void); extern void cpuset_print_current_mems_allowed(void); extern void cpuset_reset_sched_domains(void); /* * read_mems_allowed_begin is required when making decisions involving * mems_allowed such as during page allocation. mems_allowed can be updated in * parallel and depending on the new value an operation can fail potentially * causing process failure. A retry loop with read_mems_allowed_begin and * read_mems_allowed_retry prevents these artificial failures. */ static inline unsigned int read_mems_allowed_begin(void) { if (!static_branch_unlikely(&cpusets_pre_enable_key)) return 0; return read_seqcount_begin(&current->mems_allowed_seq); } /* * If this returns true, the operation that took place after * read_mems_allowed_begin may have failed artificially due to a concurrent * update of mems_allowed. It is up to the caller to retry the operation if * appropriate. */ static inline bool read_mems_allowed_retry(unsigned int seq) { if (!static_branch_unlikely(&cpusets_enabled_key)) return false; return read_seqcount_retry(&current->mems_allowed_seq, seq); } static inline void set_mems_allowed(nodemask_t nodemask) { unsigned long flags; task_lock(current); local_irq_save(flags); write_seqcount_begin(&current->mems_allowed_seq); current->mems_allowed = nodemask; write_seqcount_end(&current->mems_allowed_seq); local_irq_restore(flags); task_unlock(current); } #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } static inline bool cpusets_insane_config(void) { return false; } static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} static inline void cpuset_force_rebuild(void) { } static inline void cpuset_update_active_cpus(void) { partition_sched_domains(1, NULL, NULL); } static inline void inc_dl_tasks_cs(struct task_struct *task) { } static inline void dec_dl_tasks_cs(struct task_struct *task) { } static inline void cpuset_lock(void) { } static inline void cpuset_unlock(void) { } static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { cpumask_copy(mask, task_cpu_possible_mask(p)); } static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) { return false; } static inline bool cpuset_cpu_is_isolated(int cpu) { return false; } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) { return node_possible_map; } #define cpuset_current_mems_allowed (node_states[N_MEMORY]) static inline void cpuset_init_current_mems_allowed(void) {} static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) { return 1; } static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return true; } static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return true; } static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2) { return 1; } static inline void cpuset_memory_pressure_bump(void) {} static inline void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) { } static inline int cpuset_mem_spread_node(void) { return 0; } static inline int cpuset_do_page_mem_spread(void) { return 0; } static inline bool current_cpuset_is_being_rebound(void) { return false; } static inline void dl_rebuild_rd_accounting(void) { } static inline void rebuild_sched_domains(void) { partition_sched_domains(1, NULL, NULL); } static inline void cpuset_reset_sched_domains(void) { partition_sched_domains(1, NULL, NULL); } static inline void cpuset_print_current_mems_allowed(void) { } static inline void set_mems_allowed(nodemask_t nodemask) { } static inline unsigned int read_mems_allowed_begin(void) { return 0; } static inline bool read_mems_allowed_retry(unsigned int seq) { return false; } #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */
54 54 54 54 54 56 12 58 58 58 52 53 52 53 58 58 58 54 54 54 53 58 58 57 58 47 46 47 50 48 48 47 49 334 335 3194 334 52 3192 314 312 312 310 24 39 38 1 1 58 58 58 105 105 105 105 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 // SPDX-License-Identifier: GPL-2.0 /* * drivers/base/power/wakeup.c - System wakeup events framework * * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. */ #define pr_fmt(fmt) "PM: " fmt #include <linux/device.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/export.h> #include <linux/suspend.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/pm_wakeirq.h> #include <trace/events/power.h> #include "power.h" #define list_for_each_entry_rcu_locked(pos, head, member) \ list_for_each_entry_rcu(pos, head, member, \ srcu_read_lock_held(&wakeup_srcu)) /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. */ bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ static unsigned int wakeup_irq[2] __read_mostly; static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); /* If greater than 0 and the system is suspending, terminate the suspend. */ static atomic_t pm_abort_suspend __read_mostly; /* * Combined counters of registered wakeup events and wakeup events in progress. * They need to be modified together atomically, so it's better to use one * atomic variable to hold them both. */ static atomic_t combined_event_count = ATOMIC_INIT(0); #define IN_PROGRESS_BITS (sizeof(int) * 4) #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) static void split_counters(unsigned int *cnt, unsigned int *inpr) { unsigned int comb = atomic_read(&combined_event_count); *cnt = (comb >> IN_PROGRESS_BITS); *inpr = comb & MAX_IN_PROGRESS; } /* A preserved old value of the events counter. */ static unsigned int saved_count; static DEFINE_RAW_SPINLOCK(events_lock); static void pm_wakeup_timer_fn(struct timer_list *t); static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); DEFINE_STATIC_SRCU(wakeup_srcu); static struct wakeup_source deleted_ws = { .name = "deleted", .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), }; static DEFINE_IDA(wakeup_ida); /** * wakeup_source_create - Create a struct wakeup_source object. * @name: Name of the new wakeup source. */ struct wakeup_source *wakeup_source_create(const char *name) { struct wakeup_source *ws; const char *ws_name; int id; ws = kzalloc(sizeof(*ws), GFP_KERNEL); if (!ws) goto err_ws; ws_name = kstrdup_const(name, GFP_KERNEL); if (!ws_name) goto err_name; ws->name = ws_name; id = ida_alloc(&wakeup_ida, GFP_KERNEL); if (id < 0) goto err_id; ws->id = id; return ws; err_id: kfree_const(ws->name); err_name: kfree(ws); err_ws: return NULL; } EXPORT_SYMBOL_GPL(wakeup_source_create); /* * Record wakeup_source statistics being deleted into a dummy wakeup_source. */ static void wakeup_source_record(struct wakeup_source *ws) { unsigned long flags; spin_lock_irqsave(&deleted_ws.lock, flags); if (ws->event_count) { deleted_ws.total_time = ktime_add(deleted_ws.total_time, ws->total_time); deleted_ws.prevent_sleep_time = ktime_add(deleted_ws.prevent_sleep_time, ws->prevent_sleep_time); deleted_ws.max_time = ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ? deleted_ws.max_time : ws->max_time; deleted_ws.event_count += ws->event_count; deleted_ws.active_count += ws->active_count; deleted_ws.relax_count += ws->relax_count; deleted_ws.expire_count += ws->expire_count; deleted_ws.wakeup_count += ws->wakeup_count; } spin_unlock_irqrestore(&deleted_ws.lock, flags); } static void wakeup_source_free(struct wakeup_source *ws) { ida_free(&wakeup_ida, ws->id); kfree_const(ws->name); kfree(ws); } /** * wakeup_source_destroy - Destroy a struct wakeup_source object. * @ws: Wakeup source to destroy. * * Use only for wakeup source objects created with wakeup_source_create(). */ void wakeup_source_destroy(struct wakeup_source *ws) { if (!ws) return; __pm_relax(ws); wakeup_source_record(ws); wakeup_source_free(ws); } EXPORT_SYMBOL_GPL(wakeup_source_destroy); /** * wakeup_source_add - Add given object to the list of wakeup sources. * @ws: Wakeup source object to add to the list. */ void wakeup_source_add(struct wakeup_source *ws) { unsigned long flags; if (WARN_ON(!ws)) return; spin_lock_init(&ws->lock); timer_setup(&ws->timer, pm_wakeup_timer_fn, 0); ws->active = false; raw_spin_lock_irqsave(&events_lock, flags); list_add_rcu(&ws->entry, &wakeup_sources); raw_spin_unlock_irqrestore(&events_lock, flags); } EXPORT_SYMBOL_GPL(wakeup_source_add); /** * wakeup_source_remove - Remove given object from the wakeup sources list. * @ws: Wakeup source object to remove from the list. */ void wakeup_source_remove(struct wakeup_source *ws) { unsigned long flags; if (WARN_ON(!ws)) return; raw_spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); timer_delete_sync(&ws->timer); /* * Clear timer.function to make wakeup_source_not_registered() treat * this wakeup source as not registered. */ ws->timer.function = NULL; } EXPORT_SYMBOL_GPL(wakeup_source_remove); /** * wakeup_source_register - Create wakeup source and add it to the list. * @dev: Device this wakeup source is associated with (or NULL if virtual). * @name: Name of the wakeup source to register. */ struct wakeup_source *wakeup_source_register(struct device *dev, const char *name) { struct wakeup_source *ws; int ret; ws = wakeup_source_create(name); if (ws) { if (!dev || device_is_registered(dev)) { ret = wakeup_source_sysfs_add(dev, ws); if (ret) { wakeup_source_free(ws); return NULL; } } wakeup_source_add(ws); } return ws; } EXPORT_SYMBOL_GPL(wakeup_source_register); /** * wakeup_source_unregister - Remove wakeup source from the list and remove it. * @ws: Wakeup source object to unregister. */ void wakeup_source_unregister(struct wakeup_source *ws) { if (ws) { wakeup_source_remove(ws); if (ws->dev) wakeup_source_sysfs_remove(ws); wakeup_source_destroy(ws); } } EXPORT_SYMBOL_GPL(wakeup_source_unregister); /** * wakeup_sources_read_lock - Lock wakeup source list for read. * * Returns an index of srcu lock for struct wakeup_srcu. * This index must be passed to the matching wakeup_sources_read_unlock(). */ int wakeup_sources_read_lock(void) { return srcu_read_lock(&wakeup_srcu); } EXPORT_SYMBOL_GPL(wakeup_sources_read_lock); /** * wakeup_sources_read_unlock - Unlock wakeup source list. * @idx: return value from corresponding wakeup_sources_read_lock() */ void wakeup_sources_read_unlock(int idx) { srcu_read_unlock(&wakeup_srcu, idx); } EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock); /** * wakeup_sources_walk_start - Begin a walk on wakeup source list * * Returns first object of the list of wakeup sources. * * Note that to be safe, wakeup sources list needs to be locked by calling * wakeup_source_read_lock() for this. */ struct wakeup_source *wakeup_sources_walk_start(void) { struct list_head *ws_head = &wakeup_sources; return list_entry_rcu(ws_head->next, struct wakeup_source, entry); } EXPORT_SYMBOL_GPL(wakeup_sources_walk_start); /** * wakeup_sources_walk_next - Get next wakeup source from the list * @ws: Previous wakeup source object * * Note that to be safe, wakeup sources list needs to be locked by calling * wakeup_source_read_lock() for this. */ struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws) { struct list_head *ws_head = &wakeup_sources; return list_next_or_null_rcu(ws_head, &ws->entry, struct wakeup_source, entry); } EXPORT_SYMBOL_GPL(wakeup_sources_walk_next); /** * device_wakeup_attach - Attach a wakeup source object to a device object. * @dev: Device to handle. * @ws: Wakeup source object to attach to @dev. * * This causes @dev to be treated as a wakeup device. */ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) { spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { spin_unlock_irq(&dev->power.lock); return -EEXIST; } dev->power.wakeup = ws; if (dev->power.wakeirq) device_wakeup_attach_irq(dev, dev->power.wakeirq); spin_unlock_irq(&dev->power.lock); return 0; } /** * device_wakeup_enable - Enable given device to be a wakeup source. * @dev: Device to handle. * * Create a wakeup source object, register it and attach it to @dev. */ int device_wakeup_enable(struct device *dev) { struct wakeup_source *ws; int ret; if (!dev || !dev->power.can_wakeup) return -EINVAL; if (pm_suspend_target_state != PM_SUSPEND_ON) dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); ws = wakeup_source_register(dev, dev_name(dev)); if (!ws) return -ENOMEM; ret = device_wakeup_attach(dev, ws); if (ret) wakeup_source_unregister(ws); return ret; } EXPORT_SYMBOL_GPL(device_wakeup_enable); /** * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source * @dev: Device to handle * @wakeirq: Device specific wakeirq entry * * Attach a device wakeirq to the wakeup source so the device * wake IRQ can be configured automatically for suspend and * resume. * * Call under the device's power.lock lock. */ void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq) { struct wakeup_source *ws; ws = dev->power.wakeup; if (!ws) return; if (ws->wakeirq) dev_err(dev, "Leftover wakeup IRQ found, overriding\n"); ws->wakeirq = wakeirq; } /** * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source * @dev: Device to handle * * Removes a device wakeirq from the wakeup source. * * Call under the device's power.lock lock. */ void device_wakeup_detach_irq(struct device *dev) { struct wakeup_source *ws; ws = dev->power.wakeup; if (ws) ws->wakeirq = NULL; } /** * device_wakeup_arm_wake_irqs - * * Iterates over the list of device wakeirqs to arm them. */ void device_wakeup_arm_wake_irqs(void) { struct wakeup_source *ws; int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_arm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } /** * device_wakeup_disarm_wake_irqs - * * Iterates over the list of device wakeirqs to disarm them. */ void device_wakeup_disarm_wake_irqs(void) { struct wakeup_source *ws; int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_disarm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } /** * device_wakeup_detach - Detach a device's wakeup source object from it. * @dev: Device to detach the wakeup source object from. * * After it returns, @dev will not be treated as a wakeup device any more. */ static struct wakeup_source *device_wakeup_detach(struct device *dev) { struct wakeup_source *ws; spin_lock_irq(&dev->power.lock); ws = dev->power.wakeup; dev->power.wakeup = NULL; spin_unlock_irq(&dev->power.lock); return ws; } /** * device_wakeup_disable - Do not regard a device as a wakeup source any more. * @dev: Device to handle. * * Detach the @dev's wakeup source object from it, unregister this wakeup source * object and destroy it. */ void device_wakeup_disable(struct device *dev) { struct wakeup_source *ws; if (!dev || !dev->power.can_wakeup) return; ws = device_wakeup_detach(dev); wakeup_source_unregister(ws); } EXPORT_SYMBOL_GPL(device_wakeup_disable); /** * device_set_wakeup_capable - Set/reset device wakeup capability flag. * @dev: Device to handle. * @capable: Whether or not @dev is capable of waking up the system from sleep. * * If @capable is set, set the @dev's power.can_wakeup flag and add its * wakeup-related attributes to sysfs. Otherwise, unset the @dev's * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. * * This function may sleep and it can't be called from any context where * sleeping is not allowed. */ void device_set_wakeup_capable(struct device *dev, bool capable) { if (!!dev->power.can_wakeup == !!capable) return; dev->power.can_wakeup = capable; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { int ret = wakeup_sysfs_add(dev); if (ret) dev_info(dev, "Wakeup sysfs attributes not added\n"); } else { wakeup_sysfs_remove(dev); } } } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); /** * device_set_wakeup_enable - Enable or disable a device to wake up the system. * @dev: Device to handle. * @enable: enable/disable flag */ int device_set_wakeup_enable(struct device *dev, bool enable) { if (enable) return device_wakeup_enable(dev); device_wakeup_disable(dev); return 0; } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); /** * wakeup_source_not_registered - validate the given wakeup source. * @ws: Wakeup source to be validated. */ static bool wakeup_source_not_registered(struct wakeup_source *ws) { /* * Use timer struct to check if the given source is initialized * by wakeup_source_add. */ return ws->timer.function != pm_wakeup_timer_fn; } /* * The functions below use the observation that each wakeup event starts a * period in which the system should not be suspended. The moment this period * will end depends on how the wakeup event is going to be processed after being * detected and all of the possible cases can be divided into two distinct * groups. * * First, a wakeup event may be detected by the same functional unit that will * carry out the entire processing of it and possibly will pass it to user space * for further processing. In that case the functional unit that has detected * the event may later "close" the "no suspend" period associated with it * directly as soon as it has been dealt with. The pair of pm_stay_awake() and * pm_relax(), balanced with each other, is supposed to be used in such * situations. * * Second, a wakeup event may be detected by one functional unit and processed * by another one. In that case the unit that has detected it cannot really * "close" the "no suspend" period associated with it, unless it knows in * advance what's going to happen to the event during processing. This * knowledge, however, may not be available to it, so it can simply specify time * to wait before the system can be suspended and pass it as the second * argument of pm_wakeup_event(). * * It is valid to call pm_relax() after pm_wakeup_event(), in which case the * "no suspend" period will be ended either by the pm_relax(), or by the timer * function executed when the timer expires, whichever comes first. */ /** * wakeup_source_activate - Mark given wakeup source as active. * @ws: Wakeup source to handle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM * core of the event by incrementing the counter of the wakeup events being * processed. */ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; if (WARN_ONCE(wakeup_source_not_registered(ws), "unregistered wakeup source\n")) return; ws->active = true; ws->active_count++; ws->last_time = ktime_get(); if (ws->autosleep_enabled) ws->start_prevent_time = ws->last_time; /* Increment the counter of events in progress. */ cec = atomic_inc_return(&combined_event_count); trace_wakeup_source_activate(ws->name, cec); } /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. */ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) { ws->event_count++; /* This is racy, but the counter is approximate anyway. */ if (events_check_enabled) ws->wakeup_count++; if (!ws->active) wakeup_source_activate(ws); if (hard) pm_system_wakeup(); } /** * __pm_stay_awake - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the source of the event. * * It is safe to call this function from interrupt context. */ void __pm_stay_awake(struct wakeup_source *ws) { unsigned long flags; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws, false); timer_delete(&ws->timer); ws->timer_expires = 0; spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(__pm_stay_awake); /** * pm_stay_awake - Notify the PM core that a wakeup event is being processed. * @dev: Device the wakeup event is related to. * * Notify the PM core of a wakeup event (signaled by @dev) by calling * __pm_stay_awake for the @dev's wakeup source object. * * Call this function after detecting of a wakeup event if pm_relax() is going * to be called directly after processing the event (and possibly passing it to * user space for further processing). */ void pm_stay_awake(struct device *dev) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); __pm_stay_awake(dev->power.wakeup); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_stay_awake); #ifdef CONFIG_PM_AUTOSLEEP static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) { ktime_t delta = ktime_sub(now, ws->start_prevent_time); ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); } #else static inline void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) {} #endif /** * wakeup_source_deactivate - Mark given wakeup source as inactive. * @ws: Wakeup source to handle. * * Update the @ws' statistics and notify the PM core that the wakeup source has * become inactive by decrementing the counter of wakeup events being processed * and incrementing the counter of registered wakeup events. */ static void wakeup_source_deactivate(struct wakeup_source *ws) { unsigned int cnt, inpr, cec; ktime_t duration; ktime_t now; ws->relax_count++; /* * __pm_relax() may be called directly or from a timer function. * If it is called directly right after the timer function has been * started, but before the timer function calls __pm_relax(), it is * possible that __pm_stay_awake() will be called in the meantime and * will set ws->active. Then, ws->active may be cleared immediately * by the __pm_relax() called from the timer function, but in such a * case ws->relax_count will be different from ws->active_count. */ if (ws->relax_count != ws->active_count) { ws->relax_count--; return; } ws->active = false; now = ktime_get(); duration = ktime_sub(now, ws->last_time); ws->total_time = ktime_add(ws->total_time, duration); if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) ws->max_time = duration; ws->last_time = now; timer_delete(&ws->timer); ws->timer_expires = 0; if (ws->autosleep_enabled) update_prevent_sleep_time(ws, now); /* * Increment the counter of registered wakeup events and decrement the * counter of wakeup events in progress simultaneously. */ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); trace_wakeup_source_deactivate(ws->name, cec); split_counters(&cnt, &inpr); if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) wake_up(&wakeup_count_wait_queue); } /** * __pm_relax - Notify the PM core that processing of a wakeup event has ended. * @ws: Wakeup source object associated with the source of the event. * * Call this function for wakeup events whose processing started with calling * __pm_stay_awake(). * * It is safe to call it from interrupt context. */ void __pm_relax(struct wakeup_source *ws) { unsigned long flags; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); if (ws->active) wakeup_source_deactivate(ws); spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(__pm_relax); /** * pm_relax - Notify the PM core that processing of a wakeup event has ended. * @dev: Device that signaled the event. * * Execute __pm_relax() for the @dev's wakeup source object. */ void pm_relax(struct device *dev) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); __pm_relax(dev->power.wakeup); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_relax); /** * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. * @t: timer list * * Call wakeup_source_deactivate() for the wakeup source whose address is stored * in @data if it is currently active and its timer has not been canceled and * the expiration time of the timer is not in future. */ static void pm_wakeup_timer_fn(struct timer_list *t) { struct wakeup_source *ws = from_timer(ws, t, timer); unsigned long flags; spin_lock_irqsave(&ws->lock, flags); if (ws->active && ws->timer_expires && time_after_eq(jiffies, ws->timer_expires)) { wakeup_source_deactivate(ws); ws->expire_count++; } spin_unlock_irqrestore(&ws->lock, flags); } /** * pm_wakeup_ws_event - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the event source. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Notify the PM core of a wakeup event whose source is @ws that will take * approximately @msec milliseconds to be processed by the kernel. If @ws is * not active, activate it. If @msec is nonzero, set up the @ws' timer to * execute pm_wakeup_timer_fn() in future. * * It is safe to call this function from interrupt context. */ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) { unsigned long flags; unsigned long expires; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws, hard); if (!msec) { wakeup_source_deactivate(ws); goto unlock; } expires = jiffies + msecs_to_jiffies(msec); if (!expires) expires = 1; if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { mod_timer(&ws->timer, expires); ws->timer_expires = expires; } unlock: spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** * pm_wakeup_dev_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Call pm_wakeup_ws_event() for the @dev's wakeup source object. */ void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); pm_wakeup_ws_event(dev->power.wakeup, msec, hard); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; int srcuidx, active = 0; struct wakeup_source *last_activity_ws = NULL; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (ws->active) { pm_pr_dbg("active wakeup source: %s\n", ws->name); active = 1; } else if (!active && (!last_activity_ws || ktime_to_ns(ws->last_time) > ktime_to_ns(last_activity_ws->last_time))) { last_activity_ws = ws; } } if (!active && last_activity_ws) pm_pr_dbg("last active wakeup source: %s\n", last_activity_ws->name); srcu_read_unlock(&wakeup_srcu, srcuidx); } EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); /** * pm_wakeup_pending - Check if power transition in progress should be aborted. * * Compare the current number of registered wakeup events with its preserved * value from the past and return true if new wakeup events have been registered * since the old value was stored. Also return true if the current number of * wakeup events being processed is different from zero. */ bool pm_wakeup_pending(void) { unsigned long flags; bool ret = false; raw_spin_lock_irqsave(&events_lock, flags); if (events_check_enabled) { unsigned int cnt, inpr; split_counters(&cnt, &inpr); ret = (cnt != saved_count || inpr > 0); events_check_enabled = !ret; } raw_spin_unlock_irqrestore(&events_lock, flags); if (ret) { pm_pr_dbg("Wakeup pending, aborting suspend\n"); pm_print_active_wakeup_sources(); } return ret || atomic_read(&pm_abort_suspend) > 0; } EXPORT_SYMBOL_GPL(pm_wakeup_pending); void pm_system_wakeup(void) { atomic_inc(&pm_abort_suspend); s2idle_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); void pm_system_cancel_wakeup(void) { atomic_dec_if_positive(&pm_abort_suspend); } void pm_wakeup_clear(unsigned int irq_number) { raw_spin_lock_irq(&wakeup_irq_lock); if (irq_number && wakeup_irq[0] == irq_number) wakeup_irq[0] = wakeup_irq[1]; else wakeup_irq[0] = 0; wakeup_irq[1] = 0; raw_spin_unlock_irq(&wakeup_irq_lock); if (!irq_number) atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) { unsigned long flags; raw_spin_lock_irqsave(&wakeup_irq_lock, flags); if (wakeup_irq[0] == 0) wakeup_irq[0] = irq_number; else if (wakeup_irq[1] == 0) wakeup_irq[1] = irq_number; else irq_number = 0; pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number); raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); if (irq_number) pm_system_wakeup(); } unsigned int pm_wakeup_irq(void) { return wakeup_irq[0]; } /** * pm_get_wakeup_count - Read the number of registered wakeup events. * @count: Address to store the value at. * @block: Whether or not to block. * * Store the number of registered wakeup events at the address in @count. If * @block is set, block until the current number of wakeup events being * processed is zero. * * Return 'false' if the current number of wakeup events being processed is * nonzero. Otherwise return 'true'. */ bool pm_get_wakeup_count(unsigned int *count, bool block) { unsigned int cnt, inpr; if (block) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(&wakeup_count_wait_queue, &wait, TASK_INTERRUPTIBLE); split_counters(&cnt, &inpr); if (inpr == 0 || signal_pending(current)) break; pm_print_active_wakeup_sources(); schedule(); } finish_wait(&wakeup_count_wait_queue, &wait); } split_counters(&cnt, &inpr); *count = cnt; return !inpr; } /** * pm_save_wakeup_count - Save the current number of registered wakeup events. * @count: Value to compare with the current number of registered wakeup events. * * If @count is equal to the current number of registered wakeup events and the * current number of wakeup events being processed is zero, store @count as the * old number of registered wakeup events for pm_check_wakeup_events(), enable * wakeup events detection and return 'true'. Otherwise disable wakeup events * detection and return 'false'. */ bool pm_save_wakeup_count(unsigned int count) { unsigned int cnt, inpr; unsigned long flags; events_check_enabled = false; raw_spin_lock_irqsave(&events_lock, flags); split_counters(&cnt, &inpr); if (cnt == count && inpr == 0) { saved_count = count; events_check_enabled = true; } raw_spin_unlock_irqrestore(&events_lock, flags); return events_check_enabled; } #ifdef CONFIG_PM_AUTOSLEEP /** * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. * @set: Whether to set or to clear the autosleep_enabled flags. */ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { ws->autosleep_enabled = set; if (ws->active) { if (set) ws->start_prevent_time = now; else update_prevent_sleep_time(ws, now); } } spin_unlock_irq(&ws->lock); } srcu_read_unlock(&wakeup_srcu, srcuidx); } #endif /* CONFIG_PM_AUTOSLEEP */ /** * print_wakeup_source_stats - Print wakeup source statistics information. * @m: seq_file to print the statistics into. * @ws: Wakeup source object to print the statistics for. */ static int print_wakeup_source_stats(struct seq_file *m, struct wakeup_source *ws) { unsigned long flags; ktime_t total_time; ktime_t max_time; unsigned long active_count; ktime_t active_time; ktime_t prevent_sleep_time; spin_lock_irqsave(&ws->lock, flags); total_time = ws->total_time; max_time = ws->max_time; prevent_sleep_time = ws->prevent_sleep_time; active_count = ws->active_count; if (ws->active) { ktime_t now = ktime_get(); active_time = ktime_sub(now, ws->last_time); total_time = ktime_add(total_time, active_time); if (active_time > max_time) max_time = active_time; if (ws->autosleep_enabled) prevent_sleep_time = ktime_add(prevent_sleep_time, ktime_sub(now, ws->start_prevent_time)); } else { active_time = 0; } seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", ws->name, active_count, ws->event_count, ws->wakeup_count, ws->expire_count, ktime_to_ms(active_time), ktime_to_ms(total_time), ktime_to_ms(max_time), ktime_to_ms(ws->last_time), ktime_to_ms(prevent_sleep_time)); spin_unlock_irqrestore(&ws->lock, flags); return 0; } static void *wakeup_sources_stats_seq_start(struct seq_file *m, loff_t *pos) { struct wakeup_source *ws; loff_t n = *pos; int *srcuidx = m->private; if (n == 0) { seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); } *srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (n-- <= 0) return ws; } return NULL; } static void *wakeup_sources_stats_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct wakeup_source *ws = v; struct wakeup_source *next_ws = NULL; ++(*pos); list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) { next_ws = ws; break; } if (!next_ws) print_wakeup_source_stats(m, &deleted_ws); return next_ws; } static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v) { int *srcuidx = m->private; srcu_read_unlock(&wakeup_srcu, *srcuidx); } /** * wakeup_sources_stats_seq_show - Print wakeup sources statistics information. * @m: seq_file to print the statistics into. * @v: wakeup_source of each iteration */ static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v) { struct wakeup_source *ws = v; print_wakeup_source_stats(m, ws); return 0; } static const struct seq_operations wakeup_sources_stats_seq_ops = { .start = wakeup_sources_stats_seq_start, .next = wakeup_sources_stats_seq_next, .stop = wakeup_sources_stats_seq_stop, .show = wakeup_sources_stats_seq_show, }; static int wakeup_sources_stats_open(struct inode *inode, struct file *file) { return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int)); } static const struct file_operations wakeup_sources_stats_fops = { .owner = THIS_MODULE, .open = wakeup_sources_stats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int __init wakeup_sources_debugfs_init(void) { debugfs_create_file("wakeup_sources", 0444, NULL, NULL, &wakeup_sources_stats_fops); return 0; } postcore_initcall(wakeup_sources_debugfs_init);
3 1 3 3 1 2 1 1 1 1 3 1 1 1 1 1 1 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 // SPDX-License-Identifier: GPL-2.0+ /****************************************************************************** * speedtch.c - Alcatel SpeedTouch USB xDSL modem driver * * Copyright (C) 2001, Alcatel * Copyright (C) 2003, Duncan Sands * Copyright (C) 2004, David Woodhouse * * Based on "modem_run.c", copyright (C) 2001, Benoit Papillault ******************************************************************************/ #include <asm/page.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/usb/ch9.h> #include <linux/workqueue.h> #include "usbatm.h" #define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>" #define DRIVER_DESC "Alcatel SpeedTouch USB driver" static const char speedtch_driver_name[] = "speedtch"; #define CTRL_TIMEOUT 2000 /* milliseconds */ #define DATA_TIMEOUT 2000 /* milliseconds */ #define OFFSET_7 0 /* size 1 */ #define OFFSET_b 1 /* size 8 */ #define OFFSET_d 9 /* size 4 */ #define OFFSET_e 13 /* size 1 */ #define OFFSET_f 14 /* size 1 */ #define SIZE_7 1 #define SIZE_b 8 #define SIZE_d 4 #define SIZE_e 1 #define SIZE_f 1 #define MIN_POLL_DELAY 5000 /* milliseconds */ #define MAX_POLL_DELAY 60000 /* milliseconds */ #define RESUBMIT_DELAY 1000 /* milliseconds */ #define DEFAULT_BULK_ALTSETTING 1 #define DEFAULT_ISOC_ALTSETTING 3 #define DEFAULT_DL_512_FIRST 0 #define DEFAULT_ENABLE_ISOC 0 #define DEFAULT_SW_BUFFERING 0 static unsigned int altsetting = 0; /* zero means: use the default */ static bool dl_512_first = DEFAULT_DL_512_FIRST; static bool enable_isoc = DEFAULT_ENABLE_ISOC; static bool sw_buffering = DEFAULT_SW_BUFFERING; #define DEFAULT_B_MAX_DSL 8128 #define DEFAULT_MODEM_MODE 11 #define MODEM_OPTION_LENGTH 16 static const unsigned char DEFAULT_MODEM_OPTION[MODEM_OPTION_LENGTH] = { 0x10, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static unsigned int BMaxDSL = DEFAULT_B_MAX_DSL; static unsigned char ModemMode = DEFAULT_MODEM_MODE; static unsigned char ModemOption[MODEM_OPTION_LENGTH]; static unsigned int num_ModemOption; module_param(altsetting, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(altsetting, "Alternative setting for data interface (bulk_default: " __MODULE_STRING(DEFAULT_BULK_ALTSETTING) "; isoc_default: " __MODULE_STRING(DEFAULT_ISOC_ALTSETTING) ")"); module_param(dl_512_first, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dl_512_first, "Read 512 bytes before sending firmware (default: " __MODULE_STRING(DEFAULT_DL_512_FIRST) ")"); module_param(enable_isoc, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(enable_isoc, "Use isochronous transfers if available (default: " __MODULE_STRING(DEFAULT_ENABLE_ISOC) ")"); module_param(sw_buffering, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(sw_buffering, "Enable software buffering (default: " __MODULE_STRING(DEFAULT_SW_BUFFERING) ")"); module_param(BMaxDSL, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(BMaxDSL, "default: " __MODULE_STRING(DEFAULT_B_MAX_DSL)); module_param(ModemMode, byte, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ModemMode, "default: " __MODULE_STRING(DEFAULT_MODEM_MODE)); module_param_array(ModemOption, byte, &num_ModemOption, S_IRUGO); MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20"); #define INTERFACE_DATA 1 #define ENDPOINT_INT 0x81 #define ENDPOINT_BULK_DATA 0x07 #define ENDPOINT_ISOC_DATA 0x07 #define ENDPOINT_FIRMWARE 0x05 struct speedtch_params { unsigned int altsetting; unsigned int BMaxDSL; unsigned char ModemMode; unsigned char ModemOption[MODEM_OPTION_LENGTH]; }; struct speedtch_instance_data { struct usbatm_data *usbatm; struct speedtch_params params; /* set in probe, constant afterwards */ struct timer_list status_check_timer; struct work_struct status_check_work; unsigned char last_status; int poll_delay; /* milliseconds */ struct timer_list resubmit_timer; struct urb *int_urb; unsigned char int_data[16]; unsigned char scratch_buffer[16]; }; /*************** ** firmware ** ***************/ static void speedtch_set_swbuff(struct speedtch_instance_data *instance, int state) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; int ret; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x32, 0x40, state ? 0x01 : 0x00, 0x00, NULL, 0, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%sabling SW buffering: usb_control_msg returned %d\n", state ? "En" : "Dis", ret); else usb_dbg(usbatm, "speedtch_set_swbuff: %sbled SW buffering\n", state ? "En" : "Dis"); } static void speedtch_test_sequence(struct speedtch_instance_data *instance) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; unsigned char *buf = instance->scratch_buffer; int ret; /* URB 147 */ buf[0] = 0x1c; buf[1] = 0x50; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x0b, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB147: %d\n", __func__, ret); /* URB 148 */ buf[0] = 0x32; buf[1] = 0x00; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x02, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB148: %d\n", __func__, ret); /* URB 149 */ buf[0] = 0x01; buf[1] = 0x00; buf[2] = 0x01; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x03, 0x00, buf, 3, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB149: %d\n", __func__, ret); /* URB 150 */ buf[0] = 0x01; buf[1] = 0x00; buf[2] = 0x01; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x04, 0x00, buf, 3, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URB150: %d\n", __func__, ret); /* Extra initialisation in recent drivers - gives higher speeds */ /* URBext1 */ buf[0] = instance->params.ModemMode; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x11, 0x00, buf, 1, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URBext1: %d\n", __func__, ret); /* URBext2 */ /* This seems to be the one which actually triggers the higher sync rate -- it does require the new firmware too, although it works OK with older firmware */ ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x14, 0x00, instance->params.ModemOption, MODEM_OPTION_LENGTH, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URBext2: %d\n", __func__, ret); /* URBext3 */ buf[0] = instance->params.BMaxDSL & 0xff; buf[1] = instance->params.BMaxDSL >> 8; ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), 0x01, 0x40, 0x12, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) usb_warn(usbatm, "%s failed on URBext3: %d\n", __func__, ret); } static int speedtch_upload_firmware(struct speedtch_instance_data *instance, const struct firmware *fw1, const struct firmware *fw2) { unsigned char *buffer; struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; int actual_length; int ret = 0; int offset; usb_dbg(usbatm, "%s entered\n", __func__); buffer = (unsigned char *)__get_free_page(GFP_KERNEL); if (!buffer) { ret = -ENOMEM; usb_dbg(usbatm, "%s: no memory for buffer!\n", __func__); goto out; } if (!usb_ifnum_to_if(usb_dev, 2)) { ret = -ENODEV; usb_dbg(usbatm, "%s: interface not found!\n", __func__); goto out_free; } /* URB 7 */ if (dl_512_first) { /* some modems need a read before writing the firmware */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, 0x200, &actual_length, 2000); if (ret < 0 && ret != -ETIMEDOUT) usb_warn(usbatm, "%s: read BLOCK0 from modem failed (%d)!\n", __func__, ret); else usb_dbg(usbatm, "%s: BLOCK0 downloaded (%d bytes)\n", __func__, ret); } /* URB 8 : both leds are static green */ for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) { int thislen = min_t(int, PAGE_SIZE, fw1->size - offset); memcpy(buffer, fw1->data + offset, thislen); ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, thislen, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: write BLOCK1 to modem failed (%d)!\n", __func__, ret); goto out_free; } usb_dbg(usbatm, "%s: BLOCK1 uploaded (%zu bytes)\n", __func__, fw1->size); } /* USB led blinking green, ADSL led off */ /* URB 11 */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, 0x200, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: read BLOCK2 from modem failed (%d)!\n", __func__, ret); goto out_free; } usb_dbg(usbatm, "%s: BLOCK2 downloaded (%d bytes)\n", __func__, actual_length); /* URBs 12 to 139 - USB led blinking green, ADSL led off */ for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) { int thislen = min_t(int, PAGE_SIZE, fw2->size - offset); memcpy(buffer, fw2->data + offset, thislen); ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, thislen, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: write BLOCK3 to modem failed (%d)!\n", __func__, ret); goto out_free; } } usb_dbg(usbatm, "%s: BLOCK3 uploaded (%zu bytes)\n", __func__, fw2->size); /* USB led static green, ADSL led static red */ /* URB 142 */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE), buffer, 0x200, &actual_length, DATA_TIMEOUT); if (ret < 0) { usb_err(usbatm, "%s: read BLOCK4 from modem failed (%d)!\n", __func__, ret); goto out_free; } /* success */ usb_dbg(usbatm, "%s: BLOCK4 downloaded (%d bytes)\n", __func__, actual_length); /* Delay to allow firmware to start up. We can do this here because we're in our own kernel thread anyway. */ msleep_interruptible(1000); if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) { usb_err(usbatm, "%s: setting interface to %d failed (%d)!\n", __func__, instance->params.altsetting, ret); goto out_free; } /* Enable software buffering, if requested */ if (sw_buffering) speedtch_set_swbuff(instance, 1); /* Magic spell; don't ask us what this does */ speedtch_test_sequence(instance); ret = 0; out_free: free_page((unsigned long)buffer); out: return ret; } static int speedtch_find_firmware(struct usbatm_data *usbatm, struct usb_interface *intf, int phase, const struct firmware **fw_p) { struct device *dev = &intf->dev; const u16 bcdDevice = le16_to_cpu(interface_to_usbdev(intf)->descriptor.bcdDevice); const u8 major_revision = bcdDevice >> 8; const u8 minor_revision = bcdDevice & 0xff; char buf[24]; sprintf(buf, "speedtch-%d.bin.%x.%02x", phase, major_revision, minor_revision); usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf); if (request_firmware(fw_p, buf, dev)) { sprintf(buf, "speedtch-%d.bin.%x", phase, major_revision); usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf); if (request_firmware(fw_p, buf, dev)) { sprintf(buf, "speedtch-%d.bin", phase); usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf); if (request_firmware(fw_p, buf, dev)) { usb_err(usbatm, "%s: no stage %d firmware found!\n", __func__, phase); return -ENOENT; } } } usb_info(usbatm, "found stage %d firmware %s\n", phase, buf); return 0; } static int speedtch_heavy_init(struct usbatm_data *usbatm, struct usb_interface *intf) { const struct firmware *fw1, *fw2; struct speedtch_instance_data *instance = usbatm->driver_data; int ret; if ((ret = speedtch_find_firmware(usbatm, intf, 1, &fw1)) < 0) return ret; if ((ret = speedtch_find_firmware(usbatm, intf, 2, &fw2)) < 0) { release_firmware(fw1); return ret; } if ((ret = speedtch_upload_firmware(instance, fw1, fw2)) < 0) usb_err(usbatm, "%s: firmware upload failed (%d)!\n", __func__, ret); release_firmware(fw2); release_firmware(fw1); return ret; } /********** ** ATM ** **********/ static int speedtch_read_status(struct speedtch_instance_data *instance) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; unsigned char *buf = instance->scratch_buffer; int ret; memset(buf, 0, 16); ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x07, 0x00, buf + OFFSET_7, SIZE_7, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG 7 failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x0b, 0x00, buf + OFFSET_b, SIZE_b, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG B failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x0d, 0x00, buf + OFFSET_d, SIZE_d, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG D failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x01, 0xc0, 0x0e, 0x00, buf + OFFSET_e, SIZE_e, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG E failed\n", __func__); return ret; } ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x01, 0xc0, 0x0f, 0x00, buf + OFFSET_f, SIZE_f, CTRL_TIMEOUT); if (ret < 0) { atm_dbg(usbatm, "%s: MSG F failed\n", __func__); return ret; } return 0; } static int speedtch_start_synchro(struct speedtch_instance_data *instance) { struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; unsigned char *buf = instance->scratch_buffer; int ret; atm_dbg(usbatm, "%s entered\n", __func__); memset(buf, 0, 2); ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x04, 0x00, buf, 2, CTRL_TIMEOUT); if (ret < 0) atm_warn(usbatm, "failed to start ADSL synchronisation: %d\n", ret); else atm_dbg(usbatm, "%s: modem prodded. %d bytes returned: %02x %02x\n", __func__, ret, buf[0], buf[1]); return ret; } static void speedtch_check_status(struct work_struct *work) { struct speedtch_instance_data *instance = container_of(work, struct speedtch_instance_data, status_check_work); struct usbatm_data *usbatm = instance->usbatm; struct atm_dev *atm_dev = usbatm->atm_dev; unsigned char *buf = instance->scratch_buffer; int down_speed, up_speed, ret; unsigned char status; #ifdef VERBOSE_DEBUG atm_dbg(usbatm, "%s entered\n", __func__); #endif ret = speedtch_read_status(instance); if (ret < 0) { atm_warn(usbatm, "error %d fetching device status\n", ret); instance->poll_delay = min(2 * instance->poll_delay, MAX_POLL_DELAY); return; } instance->poll_delay = max(instance->poll_delay / 2, MIN_POLL_DELAY); status = buf[OFFSET_7]; if ((status != instance->last_status) || !status) { atm_dbg(usbatm, "%s: line state 0x%02x\n", __func__, status); switch (status) { case 0: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); if (instance->last_status) atm_info(usbatm, "ADSL line is down\n"); /* It may never resync again unless we ask it to... */ ret = speedtch_start_synchro(instance); break; case 0x08: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "ADSL line is blocked?\n"); break; case 0x10: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line is synchronising\n"); break; case 0x20: down_speed = buf[OFFSET_b] | (buf[OFFSET_b + 1] << 8) | (buf[OFFSET_b + 2] << 16) | (buf[OFFSET_b + 3] << 24); up_speed = buf[OFFSET_b + 4] | (buf[OFFSET_b + 5] << 8) | (buf[OFFSET_b + 6] << 16) | (buf[OFFSET_b + 7] << 24); if (!(down_speed & 0x0000ffff) && !(up_speed & 0x0000ffff)) { down_speed >>= 16; up_speed >>= 16; } atm_dev->link_rate = down_speed * 1000 / 424; atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND); atm_info(usbatm, "ADSL line is up (%d kb/s down | %d kb/s up)\n", down_speed, up_speed); break; default: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "unknown line state %02x\n", status); break; } instance->last_status = status; } } static void speedtch_status_poll(struct timer_list *t) { struct speedtch_instance_data *instance = from_timer(instance, t, status_check_timer); schedule_work(&instance->status_check_work); /* The following check is racy, but the race is harmless */ if (instance->poll_delay < MAX_POLL_DELAY) mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(instance->poll_delay)); else atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n"); } static void speedtch_resubmit_int(struct timer_list *t) { struct speedtch_instance_data *instance = from_timer(instance, t, resubmit_timer); struct urb *int_urb = instance->int_urb; int ret; atm_dbg(instance->usbatm, "%s entered\n", __func__); if (int_urb) { ret = usb_submit_urb(int_urb, GFP_ATOMIC); if (!ret) schedule_work(&instance->status_check_work); else { atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); } } } static void speedtch_handle_int(struct urb *int_urb) { struct speedtch_instance_data *instance = int_urb->context; struct usbatm_data *usbatm = instance->usbatm; unsigned int count = int_urb->actual_length; int status = int_urb->status; int ret; /* The magic interrupt for "up state" */ static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 }; /* The magic interrupt for "down state" */ static const unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 }; atm_dbg(usbatm, "%s entered\n", __func__); if (status < 0) { atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status); goto fail; } if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) { timer_delete(&instance->status_check_timer); atm_info(usbatm, "DSL line goes up\n"); } else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) { atm_info(usbatm, "DSL line goes down\n"); } else { int i; atm_dbg(usbatm, "%s: unknown interrupt packet of length %d:", __func__, count); for (i = 0; i < count; i++) printk(" %02x", instance->int_data[i]); printk("\n"); goto fail; } int_urb = instance->int_urb; if (int_urb) { ret = usb_submit_urb(int_urb, GFP_ATOMIC); schedule_work(&instance->status_check_work); if (ret < 0) { atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); goto fail; } } return; fail: int_urb = instance->int_urb; if (int_urb) mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); } static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_dev) { struct usb_device *usb_dev = usbatm->usb_dev; struct speedtch_instance_data *instance = usbatm->driver_data; int i, ret; unsigned char mac_str[13]; atm_dbg(usbatm, "%s entered\n", __func__); /* Set MAC address, it is stored in the serial number */ memset(atm_dev->esi, 0, sizeof(atm_dev->esi)); if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) { for (i = 0; i < 6; i++) atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) + hex_to_bin(mac_str[i * 2 + 1]); } /* Start modem synchronisation */ ret = speedtch_start_synchro(instance); /* Set up interrupt endpoint */ if (instance->int_urb) { ret = usb_submit_urb(instance->int_urb, GFP_KERNEL); if (ret < 0) { /* Doesn't matter; we'll poll anyway */ atm_dbg(usbatm, "%s: submission of interrupt URB failed (%d)!\n", __func__, ret); usb_free_urb(instance->int_urb); instance->int_urb = NULL; } } /* Start status polling */ mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(1000)); return 0; } static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_dev) { struct speedtch_instance_data *instance = usbatm->driver_data; struct urb *int_urb = instance->int_urb; atm_dbg(usbatm, "%s entered\n", __func__); timer_delete_sync(&instance->status_check_timer); /* * Since resubmit_timer and int_urb can schedule themselves and * each other, shutting them down correctly takes some care */ instance->int_urb = NULL; /* signal shutdown */ mb(); usb_kill_urb(int_urb); timer_delete_sync(&instance->resubmit_timer); /* * At this point, speedtch_handle_int and speedtch_resubmit_int * can run or be running, but instance->int_urb == NULL means that * they will not reschedule */ usb_kill_urb(int_urb); timer_delete_sync(&instance->resubmit_timer); usb_free_urb(int_urb); flush_work(&instance->status_check_work); } static int speedtch_pre_reset(struct usb_interface *intf) { return 0; } static int speedtch_post_reset(struct usb_interface *intf) { return 0; } /********** ** USB ** **********/ static const struct usb_device_id speedtch_usb_ids[] = { {USB_DEVICE(0x06b9, 0x4061)}, {} }; MODULE_DEVICE_TABLE(usb, speedtch_usb_ids); static int speedtch_usb_probe(struct usb_interface *, const struct usb_device_id *); static struct usb_driver speedtch_usb_driver = { .name = speedtch_driver_name, .probe = speedtch_usb_probe, .disconnect = usbatm_usb_disconnect, .pre_reset = speedtch_pre_reset, .post_reset = speedtch_post_reset, .id_table = speedtch_usb_ids }; static void speedtch_release_interfaces(struct usb_device *usb_dev, int num_interfaces) { struct usb_interface *cur_intf; int i; for (i = 0; i < num_interfaces; i++) { cur_intf = usb_ifnum_to_if(usb_dev, i); if (cur_intf) { usb_set_intfdata(cur_intf, NULL); usb_driver_release_interface(&speedtch_usb_driver, cur_intf); } } } static int speedtch_bind(struct usbatm_data *usbatm, struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_interface *cur_intf, *data_intf; struct speedtch_instance_data *instance; int ifnum = intf->altsetting->desc.bInterfaceNumber; int num_interfaces = usb_dev->actconfig->desc.bNumInterfaces; int i, ret; int use_isoc; usb_dbg(usbatm, "%s entered\n", __func__); /* sanity checks */ if (usb_dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) { usb_err(usbatm, "%s: wrong device class %d\n", __func__, usb_dev->descriptor.bDeviceClass); return -ENODEV; } data_intf = usb_ifnum_to_if(usb_dev, INTERFACE_DATA); if (!data_intf) { usb_err(usbatm, "%s: data interface not found!\n", __func__); return -ENODEV; } /* claim all interfaces */ for (i = 0; i < num_interfaces; i++) { cur_intf = usb_ifnum_to_if(usb_dev, i); if ((i != ifnum) && cur_intf) { ret = usb_driver_claim_interface(&speedtch_usb_driver, cur_intf, usbatm); if (ret < 0) { usb_err(usbatm, "%s: failed to claim interface %2d (%d)!\n", __func__, i, ret); speedtch_release_interfaces(usb_dev, i); return ret; } } } instance = kzalloc(sizeof(*instance), GFP_KERNEL); if (!instance) { ret = -ENOMEM; goto fail_release; } instance->usbatm = usbatm; /* module parameters may change at any moment, so take a snapshot */ instance->params.altsetting = altsetting; instance->params.BMaxDSL = BMaxDSL; instance->params.ModemMode = ModemMode; memcpy(instance->params.ModemOption, DEFAULT_MODEM_OPTION, MODEM_OPTION_LENGTH); memcpy(instance->params.ModemOption, ModemOption, num_ModemOption); use_isoc = enable_isoc; if (instance->params.altsetting) if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) { usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, instance->params.altsetting, ret); instance->params.altsetting = 0; /* fall back to default */ } if (!instance->params.altsetting && use_isoc) if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_ISOC_ALTSETTING)) < 0) { usb_dbg(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_ISOC_ALTSETTING, ret); use_isoc = 0; /* fall back to bulk */ } if (use_isoc) { const struct usb_host_interface *desc = data_intf->cur_altsetting; const __u8 target_address = USB_DIR_IN | usbatm->driver->isoc_in; use_isoc = 0; /* fall back to bulk if endpoint not found */ for (i = 0; i < desc->desc.bNumEndpoints; i++) { const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc; if ((endpoint_desc->bEndpointAddress == target_address)) { use_isoc = usb_endpoint_xfer_isoc(endpoint_desc); break; } } if (!use_isoc) usb_info(usbatm, "isochronous transfer not supported - using bulk\n"); } if (!use_isoc && !instance->params.altsetting) if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_BULK_ALTSETTING)) < 0) { usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_BULK_ALTSETTING, ret); goto fail_free; } if (!instance->params.altsetting) instance->params.altsetting = use_isoc ? DEFAULT_ISOC_ALTSETTING : DEFAULT_BULK_ALTSETTING; usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); INIT_WORK(&instance->status_check_work, speedtch_check_status); timer_setup(&instance->status_check_timer, speedtch_status_poll, 0); instance->last_status = 0xff; instance->poll_delay = MIN_POLL_DELAY; timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0); instance->int_urb = usb_alloc_urb(0, GFP_KERNEL); if (instance->int_urb) usb_fill_int_urb(instance->int_urb, usb_dev, usb_rcvintpipe(usb_dev, ENDPOINT_INT), instance->int_data, sizeof(instance->int_data), speedtch_handle_int, instance, 16); else usb_dbg(usbatm, "%s: no memory for interrupt urb!\n", __func__); /* check whether the modem already seems to be alive */ ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), 0x12, 0xc0, 0x07, 0x00, instance->scratch_buffer + OFFSET_7, SIZE_7, 500); usbatm->flags |= (ret == SIZE_7 ? UDSL_SKIP_HEAVY_INIT : 0); usb_dbg(usbatm, "%s: firmware %s loaded\n", __func__, usbatm->flags & UDSL_SKIP_HEAVY_INIT ? "already" : "not"); if (!(usbatm->flags & UDSL_SKIP_HEAVY_INIT)) if ((ret = usb_reset_device(usb_dev)) < 0) { usb_err(usbatm, "%s: device reset failed (%d)!\n", __func__, ret); goto fail_free; } usbatm->driver_data = instance; return 0; fail_free: usb_free_urb(instance->int_urb); kfree(instance); fail_release: speedtch_release_interfaces(usb_dev, num_interfaces); return ret; } static void speedtch_unbind(struct usbatm_data *usbatm, struct usb_interface *intf) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct speedtch_instance_data *instance = usbatm->driver_data; usb_dbg(usbatm, "%s entered\n", __func__); speedtch_release_interfaces(usb_dev, usb_dev->actconfig->desc.bNumInterfaces); usb_free_urb(instance->int_urb); kfree(instance); } /*********** ** init ** ***********/ static struct usbatm_driver speedtch_usbatm_driver = { .driver_name = speedtch_driver_name, .bind = speedtch_bind, .heavy_init = speedtch_heavy_init, .unbind = speedtch_unbind, .atm_start = speedtch_atm_start, .atm_stop = speedtch_atm_stop, .bulk_in = ENDPOINT_BULK_DATA, .bulk_out = ENDPOINT_BULK_DATA, .isoc_in = ENDPOINT_ISOC_DATA }; static int speedtch_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver); } module_usb_driver(speedtch_usb_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
6 6 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 // SPDX-License-Identifier: GPL-2.0-only /* * Generic GPIO card-detect helper * * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> */ #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/mmc/host.h> #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/slab.h> #include "slot-gpio.h" struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; irq_handler_t cd_gpio_isr; char *ro_label; char *cd_label; u32 cd_debounce_delay_ms; int cd_irq; }; static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) { /* Schedule a card detection after a debounce timeout */ struct mmc_host *host = dev_id; struct mmc_gpio *ctx = host->slot.handler_priv; host->trigger_card_event = true; mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms)); return IRQ_HANDLED; } int mmc_gpio_alloc(struct mmc_host *host) { const char *devname = dev_name(host->parent); struct mmc_gpio *ctx; ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->cd_debounce_delay_ms = 200; ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname); if (!ctx->cd_label) return -ENOMEM; ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname); if (!ctx->ro_label) return -ENOMEM; ctx->cd_irq = -EINVAL; host->slot.handler_priv = ctx; host->slot.cd_irq = -EINVAL; return 0; } void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq) { struct mmc_gpio *ctx = host->slot.handler_priv; if (!ctx || irq < 0) return; ctx->cd_irq = irq; } EXPORT_SYMBOL(mmc_gpio_set_cd_irq); int mmc_gpio_get_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int cansleep; if (!ctx || !ctx->ro_gpio) return -ENOSYS; cansleep = gpiod_cansleep(ctx->ro_gpio); return cansleep ? gpiod_get_value_cansleep(ctx->ro_gpio) : gpiod_get_value(ctx->ro_gpio); } EXPORT_SYMBOL(mmc_gpio_get_ro); int mmc_gpio_get_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int cansleep; if (!ctx || !ctx->cd_gpio) return -ENOSYS; cansleep = gpiod_cansleep(ctx->cd_gpio); return cansleep ? gpiod_get_value_cansleep(ctx->cd_gpio) : gpiod_get_value(ctx->cd_gpio); } EXPORT_SYMBOL(mmc_gpio_get_cd); void mmc_gpiod_request_cd_irq(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int irq = -EINVAL; int ret; if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) return; /* * Do not use IRQ if the platform prefers to poll, e.g., because that * IRQ number is already used by another unit and cannot be shared. */ if (ctx->cd_irq >= 0) irq = ctx->cd_irq; else if (!(host->caps & MMC_CAP_NEEDS_POLL)) irq = gpiod_to_irq(ctx->cd_gpio); if (irq >= 0) { if (!ctx->cd_gpio_isr) ctx->cd_gpio_isr = mmc_gpio_cd_irqt; ret = devm_request_threaded_irq(host->parent, irq, NULL, ctx->cd_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, ctx->cd_label, host); if (ret < 0) irq = ret; } host->slot.cd_irq = irq; if (irq < 0) host->caps |= MMC_CAP_NEEDS_POLL; } EXPORT_SYMBOL(mmc_gpiod_request_cd_irq); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on) { int ret = 0; if (!(host->caps & MMC_CAP_CD_WAKE) || host->slot.cd_irq < 0 || on == host->slot.cd_wake_enabled) return 0; if (on) { ret = enable_irq_wake(host->slot.cd_irq); host->slot.cd_wake_enabled = !ret; } else { disable_irq_wake(host->slot.cd_irq); host->slot.cd_wake_enabled = false; } return ret; } EXPORT_SYMBOL(mmc_gpio_set_cd_wake); /** * mmc_gpiod_request_cd - request a gpio descriptor for card-detection * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds * * Note that this must be called prior to mmc_add_host() * otherwise the caller must also call mmc_gpiod_request_cd_irq(). * * Returns zero on success, else an error. */ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; int ret; desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); if (IS_ERR(desc)) return PTR_ERR(desc); /* Update default label if no con_id provided */ if (!con_id) gpiod_set_consumer_name(desc, ctx->cd_label); if (debounce) { ret = gpiod_set_debounce(desc, debounce); if (ret < 0) ctx->cd_debounce_delay_ms = debounce / 1000; } /* override forces default (active-low) polarity ... */ if (override_active_level && !gpiod_is_active_low(desc)) gpiod_toggle_active_low(desc); /* ... or active-high */ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) gpiod_toggle_active_low(desc); ctx->cd_gpio = desc; return 0; } EXPORT_SYMBOL(mmc_gpiod_request_cd); /** * mmc_gpiod_set_cd_config - set config for card-detection GPIO * @host: mmc host * @config: Generic pinconf config (from pinconf_to_config_packed()) * * This can be used by mmc host drivers to fixup a card-detection GPIO's config * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor * through mmc_gpiod_request_cd(). * * Returns: * 0 on success, or a negative errno value on error. */ int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config) { struct mmc_gpio *ctx = host->slot.handler_priv; return gpiod_set_config(ctx->cd_gpio, config); } EXPORT_SYMBOL(mmc_gpiod_set_cd_config); bool mmc_can_gpio_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; return ctx->cd_gpio ? true : false; } EXPORT_SYMBOL(mmc_can_gpio_cd); /** * mmc_gpiod_request_ro - request a gpio descriptor for write protection * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @debounce: debounce time in microseconds * * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int idx, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; int ret; desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); if (IS_ERR(desc)) return PTR_ERR(desc); /* Update default label if no con_id provided */ if (!con_id) gpiod_set_consumer_name(desc, ctx->ro_label); if (debounce) { ret = gpiod_set_debounce(desc, debounce); if (ret < 0) return ret; } if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) gpiod_toggle_active_low(desc); ctx->ro_gpio = desc; return 0; } EXPORT_SYMBOL(mmc_gpiod_request_ro); bool mmc_can_gpio_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; return ctx->ro_gpio ? true : false; } EXPORT_SYMBOL(mmc_can_gpio_ro);
181 183 183 74 183 109 105 74 74 74 3234 3241 3240 3237 3232 3228 3 3236 3237 25 3232 25 3243 3246 139 3245 3250 3233 1912 3203 3201 3236 139 139 3249 3218 3237 3198 3192 3238 3240 3243 3226 3227 3227 3240 1914 1914 1912 3247 138 139 139 138 139 139 3243 3244 3200 3202 3202 3242 22 22 22 3239 3246 3244 3241 3241 3243 3225 3227 3222 3223 3234 3240 3226 3232 3226 3235 3239 3240 3234 3244 24 25 25 20 3244 3244 25 3245 3245 363 364 362 364 364 362 10 10 10 10 10 10 16 16 16 16 16 16 16 16 16 16 16 16 16 15 16 63 63 62 63 63 63 63 63 63 63 3217 3219 3218 3230 3223 4 4 4 4 4 4 4 4 3226 10 15 63 3219 3224 24 24 364 3228 3242 2 3247 68 3250 3246 3250 3250 3241 3250 3241 3241 3242 2 3241 91 22 68 69 3242 3247 3206 3238 3242 3239 3243 3241 68 3239 3241 3242 3240 3246 3243 3250 3242 3241 3245 3201 3244 3244 3250 3248 3247 2 69 3225 3225 3232 3225 3225 3245 3230 3223 3241 3245 3250 3225 3245 3220 3228 3227 1 3231 210 210 210 3234 138 74 138 139 139 72 74 74 74 74 73 74 74 73 73 74 74 74 74 74 139 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 // SPDX-License-Identifier: GPL-2.0-only /* * linux/lib/vsprintf.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ /* * Wirzenius wrote this portably, Torvalds fucked it up :-) */ /* * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> * - changed to provide snprintf and vsnprintf functions * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> * - scnprintf and vscnprintf */ #include <linux/stdarg.h> #include <linux/build_bug.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/errname.h> #include <linux/module.h> /* for KSYM_SYMBOL_LEN */ #include <linux/types.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/kallsyms.h> #include <linux/math64.h> #include <linux/uaccess.h> #include <linux/ioport.h> #include <linux/dcache.h> #include <linux/cred.h> #include <linux/rtc.h> #include <linux/sprintf.h> #include <linux/time.h> #include <linux/uuid.h> #include <linux/of.h> #include <net/addrconf.h> #include <linux/siphash.h> #include <linux/compiler.h> #include <linux/property.h> #include <linux/notifier.h> #ifdef CONFIG_BLOCK #include <linux/blkdev.h> #endif #include "../mm/internal.h" /* For the trace_print_flags arrays */ #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/byteorder.h> /* cpu_to_le16 */ #include <linux/unaligned.h> #include <linux/string_helpers.h> #include "kstrtox.h" /* Disable pointer hashing if requested */ bool no_hash_pointers __ro_after_init; EXPORT_SYMBOL_GPL(no_hash_pointers); noinline static unsigned long long simple_strntoull(const char *startp, char **endp, unsigned int base, size_t max_chars) { const char *cp; unsigned long long result = 0ULL; size_t prefix_chars; unsigned int rv; cp = _parse_integer_fixup_radix(startp, &base); prefix_chars = cp - startp; if (prefix_chars < max_chars) { rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars); /* FIXME */ cp += (rv & ~KSTRTOX_OVERFLOW); } else { /* Field too short for prefix + digit, skip over without converting */ cp = startp + max_chars; } if (endp) *endp = (char *)cp; return result; } /** * simple_strtoull - convert a string to an unsigned long long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * * This function has caveats. Please use kstrtoull instead. */ noinline unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) { return simple_strntoull(cp, endp, base, INT_MAX); } EXPORT_SYMBOL(simple_strtoull); /** * simple_strtoul - convert a string to an unsigned long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * * This function has caveats. Please use kstrtoul instead. */ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) { return simple_strtoull(cp, endp, base); } EXPORT_SYMBOL(simple_strtoul); unsigned long simple_strntoul(const char *cp, char **endp, unsigned int base, size_t max_chars) { return simple_strntoull(cp, endp, base, max_chars); } EXPORT_SYMBOL(simple_strntoul); /** * simple_strtol - convert a string to a signed long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * * This function has caveats. Please use kstrtol instead. */ long simple_strtol(const char *cp, char **endp, unsigned int base) { if (*cp == '-') return -simple_strtoul(cp + 1, endp, base); return simple_strtoul(cp, endp, base); } EXPORT_SYMBOL(simple_strtol); noinline static long long simple_strntoll(const char *cp, char **endp, unsigned int base, size_t max_chars) { /* * simple_strntoull() safely handles receiving max_chars==0 in the * case cp[0] == '-' && max_chars == 1. * If max_chars == 0 we can drop through and pass it to simple_strntoull() * and the content of *cp is irrelevant. */ if (*cp == '-' && max_chars > 0) return -simple_strntoull(cp + 1, endp, base, max_chars - 1); return simple_strntoull(cp, endp, base, max_chars); } /** * simple_strtoll - convert a string to a signed long long * @cp: The start of the string * @endp: A pointer to the end of the parsed string will be placed here * @base: The number base to use * * This function has caveats. Please use kstrtoll instead. */ long long simple_strtoll(const char *cp, char **endp, unsigned int base) { return simple_strntoll(cp, endp, base, INT_MAX); } EXPORT_SYMBOL(simple_strtoll); static inline int skip_atoi(const char **s) { int i = 0; do { i = i*10 + *((*s)++) - '0'; } while (isdigit(**s)); return i; } /* * Decimal conversion is by far the most typical, and is used for * /proc and /sys data. This directly impacts e.g. top performance * with many processes running. We optimize it for speed by emitting * two characters at a time, using a 200 byte lookup table. This * roughly halves the number of multiplications compared to computing * the digits one at a time. Implementation strongly inspired by the * previous version, which in turn used ideas described at * <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission * from the author, Douglas W. Jones). * * It turns out there is precisely one 26 bit fixed-point * approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32 * holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual * range happens to be somewhat larger (x <= 1073741898), but that's * irrelevant for our purpose. * * For dividing a number in the range [10^4, 10^6-1] by 100, we still * need a 32x32->64 bit multiply, so we simply use the same constant. * * For dividing a number in the range [100, 10^4-1] by 100, there are * several options. The simplest is (x * 0x147b) >> 19, which is valid * for all x <= 43698. */ static const u16 decpair[100] = { #define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030) _( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9), _(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19), _(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29), _(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39), _(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49), _(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59), _(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69), _(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79), _(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89), _(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99), #undef _ }; /* * This will print a single '0' even if r == 0, since we would * immediately jump to out_r where two 0s would be written but only * one of them accounted for in buf. This is needed by ip4_string * below. All other callers pass a non-zero value of r. */ static noinline_for_stack char *put_dec_trunc8(char *buf, unsigned r) { unsigned q; /* 1 <= r < 10^8 */ if (r < 100) goto out_r; /* 100 <= r < 10^8 */ q = (r * (u64)0x28f5c29) >> 32; *((u16 *)buf) = decpair[r - 100*q]; buf += 2; /* 1 <= q < 10^6 */ if (q < 100) goto out_q; /* 100 <= q < 10^6 */ r = (q * (u64)0x28f5c29) >> 32; *((u16 *)buf) = decpair[q - 100*r]; buf += 2; /* 1 <= r < 10^4 */ if (r < 100) goto out_r; /* 100 <= r < 10^4 */ q = (r * 0x147b) >> 19; *((u16 *)buf) = decpair[r - 100*q]; buf += 2; out_q: /* 1 <= q < 100 */ r = q; out_r: /* 1 <= r < 100 */ *((u16 *)buf) = decpair[r]; buf += r < 10 ? 1 : 2; return buf; } #if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64 static noinline_for_stack char *put_dec_full8(char *buf, unsigned r) { unsigned q; /* 0 <= r < 10^8 */ q = (r * (u64)0x28f5c29) >> 32; *((u16 *)buf) = decpair[r - 100*q]; buf += 2; /* 0 <= q < 10^6 */ r = (q * (u64)0x28f5c29) >> 32; *((u16 *)buf) = decpair[q - 100*r]; buf += 2; /* 0 <= r < 10^4 */ q = (r * 0x147b) >> 19; *((u16 *)buf) = decpair[r - 100*q]; buf += 2; /* 0 <= q < 100 */ *((u16 *)buf) = decpair[q]; buf += 2; return buf; } static noinline_for_stack char *put_dec(char *buf, unsigned long long n) { if (n >= 100*1000*1000) buf = put_dec_full8(buf, do_div(n, 100*1000*1000)); /* 1 <= n <= 1.6e11 */ if (n >= 100*1000*1000) buf = put_dec_full8(buf, do_div(n, 100*1000*1000)); /* 1 <= n < 1e8 */ return put_dec_trunc8(buf, n); } #elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64 static void put_dec_full4(char *buf, unsigned r) { unsigned q; /* 0 <= r < 10^4 */ q = (r * 0x147b) >> 19; *((u16 *)buf) = decpair[r - 100*q]; buf += 2; /* 0 <= q < 100 */ *((u16 *)buf) = decpair[q]; } /* * Call put_dec_full4 on x % 10000, return x / 10000. * The approximation x/10000 == (x * 0x346DC5D7) >> 43 * holds for all x < 1,128,869,999. The largest value this * helper will ever be asked to convert is 1,125,520,955. * (second call in the put_dec code, assuming n is all-ones). */ static noinline_for_stack unsigned put_dec_helper4(char *buf, unsigned x) { uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43; put_dec_full4(buf, x - q * 10000); return q; } /* Based on code by Douglas W. Jones found at * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour> * (with permission from the author). * Performs no 64-bit division and hence should be fast on 32-bit machines. */ static char *put_dec(char *buf, unsigned long long n) { uint32_t d3, d2, d1, q, h; if (n < 100*1000*1000) return put_dec_trunc8(buf, n); d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */ h = (n >> 32); d2 = (h ) & 0xffff; d3 = (h >> 16); /* implicit "& 0xffff" */ /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0 = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); q = put_dec_helper4(buf, q); q += 7671 * d3 + 9496 * d2 + 6 * d1; q = put_dec_helper4(buf+4, q); q += 4749 * d3 + 42 * d2; q = put_dec_helper4(buf+8, q); q += 281 * d3; buf += 12; if (q) buf = put_dec_trunc8(buf, q); else while (buf[-1] == '0') --buf; return buf; } #endif /* * Convert passed number to decimal string. * Returns the length of string. On buffer overflow, returns 0. * * If speed is not important, use snprintf(). It's easy to read the code. */ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width) { /* put_dec requires 2-byte alignment of the buffer. */ char tmp[sizeof(num) * 3] __aligned(2); int idx, len; /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */ if (num <= 9) { tmp[0] = '0' + num; len = 1; } else { len = put_dec(tmp, num) - tmp; } if (len > size || width > size) return 0; if (width > len) { width = width - len; for (idx = 0; idx < width; idx++) buf[idx] = ' '; } else { width = 0; } for (idx = 0; idx < len; ++idx) buf[idx + width] = tmp[len - idx - 1]; return len + width; } #define SIGN 1 /* unsigned/signed */ #define LEFT 2 /* left justified */ #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */ #define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ #define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ static_assert(ZEROPAD == ('0' - ' ')); static_assert(SMALL == ('a' ^ 'A')); enum format_state { FORMAT_STATE_NONE, /* Just a string part */ FORMAT_STATE_NUM, FORMAT_STATE_WIDTH, FORMAT_STATE_PRECISION, FORMAT_STATE_CHAR, FORMAT_STATE_STR, FORMAT_STATE_PTR, FORMAT_STATE_PERCENT_CHAR, FORMAT_STATE_INVALID, }; struct printf_spec { unsigned char flags; /* flags to number() */ unsigned char base; /* number base, 8, 10 or 16 only */ short precision; /* # of digits/chars */ int field_width; /* width of output field */ } __packed; static_assert(sizeof(struct printf_spec) == 8); #define FIELD_WIDTH_MAX ((1 << 23) - 1) #define PRECISION_MAX ((1 << 15) - 1) static noinline_for_stack char *number(char *buf, char *end, unsigned long long num, struct printf_spec spec) { /* put_dec requires 2-byte alignment of the buffer. */ char tmp[3 * sizeof(num)] __aligned(2); char sign; char locase; int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); int i; bool is_zero = num == 0LL; int field_width = spec.field_width; int precision = spec.precision; /* locase = 0 or 0x20. ORing digits or letters with 'locase' * produces same digits or (maybe lowercased) letters */ locase = (spec.flags & SMALL); if (spec.flags & LEFT) spec.flags &= ~ZEROPAD; sign = 0; if (spec.flags & SIGN) { if ((signed long long)num < 0) { sign = '-'; num = -(signed long long)num; field_width--; } else if (spec.flags & PLUS) { sign = '+'; field_width--; } else if (spec.flags & SPACE) { sign = ' '; field_width--; } } if (need_pfx) { if (spec.base == 16) field_width -= 2; else if (!is_zero) field_width--; } /* generate full string in tmp[], in reverse order */ i = 0; if (num < spec.base) tmp[i++] = hex_asc_upper[num] | locase; else if (spec.base != 10) { /* 8 or 16 */ int mask = spec.base - 1; int shift = 3; if (spec.base == 16) shift = 4; do { tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase); num >>= shift; } while (num); } else { /* base 10 */ i = put_dec(tmp, num) - tmp; } /* printing 100 using %2d gives "100", not "00" */ if (i > precision) precision = i; /* leading space padding */ field_width -= precision; if (!(spec.flags & (ZEROPAD | LEFT))) { while (--field_width >= 0) { if (buf < end) *buf = ' '; ++buf; } } /* sign */ if (sign) { if (buf < end) *buf = sign; ++buf; } /* "0x" / "0" prefix */ if (need_pfx) { if (spec.base == 16 || !is_zero) { if (buf < end) *buf = '0'; ++buf; } if (spec.base == 16) { if (buf < end) *buf = ('X' | locase); ++buf; } } /* zero or space padding */ if (!(spec.flags & LEFT)) { char c = ' ' + (spec.flags & ZEROPAD); while (--field_width >= 0) { if (buf < end) *buf = c; ++buf; } } /* hmm even more zero padding? */ while (i <= --precision) { if (buf < end) *buf = '0'; ++buf; } /* actual digits of result */ while (--i >= 0) { if (buf < end) *buf = tmp[i]; ++buf; } /* trailing space padding */ while (--field_width >= 0) { if (buf < end) *buf = ' '; ++buf; } return buf; } static noinline_for_stack char *special_hex_number(char *buf, char *end, unsigned long long num, int size) { struct printf_spec spec; spec.field_width = 2 + 2 * size; /* 0x + hex */ spec.flags = SPECIAL | SMALL | ZEROPAD; spec.base = 16; spec.precision = -1; return number(buf, end, num, spec); } static void move_right(char *buf, char *end, unsigned len, unsigned spaces) { size_t size; if (buf >= end) /* nowhere to put anything */ return; size = end - buf; if (size <= spaces) { memset(buf, ' ', size); return; } if (len) { if (len > size - spaces) len = size - spaces; memmove(buf + spaces, buf, len); } memset(buf, ' ', spaces); } /* * Handle field width padding for a string. * @buf: current buffer position * @n: length of string * @end: end of output buffer * @spec: for field width and flags * Returns: new buffer position after padding. */ static noinline_for_stack char *widen_string(char *buf, int n, char *end, struct printf_spec spec) { unsigned spaces; if (likely(n >= spec.field_width)) return buf; /* we want to pad the sucker */ spaces = spec.field_width - n; if (!(spec.flags & LEFT)) { move_right(buf - n, end, n, spaces); return buf + spaces; } while (spaces--) { if (buf < end) *buf = ' '; ++buf; } return buf; } /* Handle string from a well known address. */ static char *string_nocheck(char *buf, char *end, const char *s, struct printf_spec spec) { int len = 0; int lim = spec.precision; while (lim--) { char c = *s++; if (!c) break; if (buf < end) *buf = c; ++buf; ++len; } return widen_string(buf, len, end, spec); } static char *err_ptr(char *buf, char *end, void *ptr, struct printf_spec spec) { int err = PTR_ERR(ptr); const char *sym = errname(err); if (sym) return string_nocheck(buf, end, sym, spec); /* * Somebody passed ERR_PTR(-1234) or some other non-existing * Efoo - or perhaps CONFIG_SYMBOLIC_ERRNAME=n. Fall back to * printing it as its decimal representation. */ spec.flags |= SIGN; spec.base = 10; return number(buf, end, err, spec); } /* Be careful: error messages must fit into the given buffer. */ static char *error_string(char *buf, char *end, const char *s, struct printf_spec spec) { /* * Hard limit to avoid a completely insane messages. It actually * works pretty well because most error messages are in * the many pointer format modifiers. */ if (spec.precision == -1) spec.precision = 2 * sizeof(void *); return string_nocheck(buf, end, s, spec); } /* * Do not call any complex external code here. Nested printk()/vsprintf() * might cause infinite loops. Failures might break printk() and would * be hard to debug. */ static const char *check_pointer_msg(const void *ptr) { if (!ptr) return "(null)"; if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr)) return "(efault)"; return NULL; } static int check_pointer(char **buf, char *end, const void *ptr, struct printf_spec spec) { const char *err_msg; err_msg = check_pointer_msg(ptr); if (err_msg) { *buf = error_string(*buf, end, err_msg, spec); return -EFAULT; } return 0; } static noinline_for_stack char *string(char *buf, char *end, const char *s, struct printf_spec spec) { if (check_pointer(&buf, end, s, spec)) return buf; return string_nocheck(buf, end, s, spec); } static char *pointer_string(char *buf, char *end, const void *ptr, struct printf_spec spec) { spec.base = 16; spec.flags |= SMALL; if (spec.field_width == -1) { spec.field_width = 2 * sizeof(ptr); spec.flags |= ZEROPAD; } return number(buf, end, (unsigned long int)ptr, spec); } /* Make pointers available for printing early in the boot sequence. */ static int debug_boot_weak_hash __ro_after_init; static int __init debug_boot_weak_hash_enable(char *str) { debug_boot_weak_hash = 1; pr_info("debug_boot_weak_hash enabled\n"); return 0; } early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable); static bool filled_random_ptr_key __read_mostly; static siphash_key_t ptr_key __read_mostly; static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data) { get_random_bytes(&ptr_key, sizeof(ptr_key)); /* Pairs with smp_rmb() before reading ptr_key. */ smp_wmb(); WRITE_ONCE(filled_random_ptr_key, true); return NOTIFY_DONE; } static int __init vsprintf_init_hashval(void) { static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key }; execute_with_initialized_rng(&fill_ptr_key_nb); return 0; } subsys_initcall(vsprintf_init_hashval) /* Maps a pointer to a 32 bit unique identifier. */ static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) { unsigned long hashval; if (!READ_ONCE(filled_random_ptr_key)) return -EBUSY; /* Pairs with smp_wmb() after writing ptr_key. */ smp_rmb(); #ifdef CONFIG_64BIT hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key); /* * Mask off the first 32 bits, this makes explicit that we have * modified the address (and 32 bits is plenty for a unique ID). */ hashval = hashval & 0xffffffff; #else hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key); #endif *hashval_out = hashval; return 0; } int ptr_to_hashval(const void *ptr, unsigned long *hashval_out) { return __ptr_to_hashval(ptr, hashval_out); } static char *ptr_to_id(char *buf, char *end, const void *ptr, struct printf_spec spec) { const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)"; unsigned long hashval; int ret; /* * Print the real pointer value for NULL and error pointers, * as they are not actual addresses. */ if (IS_ERR_OR_NULL(ptr)) return pointer_string(buf, end, ptr, spec); /* When debugging early boot use non-cryptographically secure hash. */ if (unlikely(debug_boot_weak_hash)) { hashval = hash_long((unsigned long)ptr, 32); return pointer_string(buf, end, (const void *)hashval, spec); } ret = __ptr_to_hashval(ptr, &hashval); if (ret) { spec.field_width = 2 * sizeof(ptr); /* string length must be less than default_width */ return error_string(buf, end, str, spec); } return pointer_string(buf, end, (const void *)hashval, spec); } static char *default_pointer(char *buf, char *end, const void *ptr, struct printf_spec spec) { /* * default is to _not_ leak addresses, so hash before printing, * unless no_hash_pointers is specified on the command line. */ if (unlikely(no_hash_pointers)) return pointer_string(buf, end, ptr, spec); return ptr_to_id(buf, end, ptr, spec); } int kptr_restrict __read_mostly; static noinline_for_stack char *restricted_pointer(char *buf, char *end, const void *ptr, struct printf_spec spec) { switch (kptr_restrict) { case 0: /* Handle as %p, hash and do _not_ leak addresses. */ return default_pointer(buf, end, ptr, spec); case 1: { const struct cred *cred; /* * kptr_restrict==1 cannot be used in IRQ context * because its test for CAP_SYSLOG would be meaningless. */ if (in_hardirq() || in_serving_softirq() || in_nmi()) { if (spec.field_width == -1) spec.field_width = 2 * sizeof(ptr); return error_string(buf, end, "pK-error", spec); } /* * Only print the real pointer value if the current * process has CAP_SYSLOG and is running with the * same credentials it started with. This is because * access to files is checked at open() time, but %pK * checks permission at read() time. We don't want to * leak pointer values if a binary opens a file using * %pK and then elevates privileges before reading it. */ cred = current_cred(); if (!has_capability_noaudit(current, CAP_SYSLOG) || !uid_eq(cred->euid, cred->uid) || !gid_eq(cred->egid, cred->gid)) ptr = NULL; break; } case 2: default: /* Always print 0's for %pK */ ptr = NULL; break; } return pointer_string(buf, end, ptr, spec); } static noinline_for_stack char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec, const char *fmt) { const char *array[4], *s; const struct dentry *p; int depth; int i, n; switch (fmt[1]) { case '2': case '3': case '4': depth = fmt[1] - '0'; break; default: depth = 1; } rcu_read_lock(); for (i = 0; i < depth; i++, d = p) { if (check_pointer(&buf, end, d, spec)) { rcu_read_unlock(); return buf; } p = READ_ONCE(d->d_parent); array[i] = READ_ONCE(d->d_name.name); if (p == d) { if (i) array[i] = ""; i++; break; } } s = array[--i]; for (n = 0; n != spec.precision; n++, buf++) { char c = *s++; if (!c) { if (!i) break; c = '/'; s = array[--i]; } if (buf < end) *buf = c; } rcu_read_unlock(); return widen_string(buf, n, end, spec); } static noinline_for_stack char *file_dentry_name(char *buf, char *end, const struct file *f, struct printf_spec spec, const char *fmt) { if (check_pointer(&buf, end, f, spec)) return buf; return dentry_name(buf, end, f->f_path.dentry, spec, fmt); } #ifdef CONFIG_BLOCK static noinline_for_stack char *bdev_name(char *buf, char *end, struct block_device *bdev, struct printf_spec spec, const char *fmt) { struct gendisk *hd; if (check_pointer(&buf, end, bdev, spec)) return buf; hd = bdev->bd_disk; buf = string(buf, end, hd->disk_name, spec); if (bdev_is_partition(bdev)) { if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) { if (buf < end) *buf = 'p'; buf++; } buf = number(buf, end, bdev_partno(bdev), spec); } return buf; } #endif static noinline_for_stack char *symbol_string(char *buf, char *end, void *ptr, struct printf_spec spec, const char *fmt) { unsigned long value; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; #endif if (fmt[1] == 'R') ptr = __builtin_extract_return_addr(ptr); value = (unsigned long)ptr; #ifdef CONFIG_KALLSYMS if (*fmt == 'B' && fmt[1] == 'b') sprint_backtrace_build_id(sym, value); else if (*fmt == 'B') sprint_backtrace(sym, value); else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b'))) sprint_symbol_build_id(sym, value); else if (*fmt != 's') sprint_symbol(sym, value); else sprint_symbol_no_offset(sym, value); return string_nocheck(buf, end, sym, spec); #else return special_hex_number(buf, end, value, sizeof(void *)); #endif } static const struct printf_spec default_str_spec = { .field_width = -1, .precision = -1, }; static const struct printf_spec default_flag_spec = { .base = 16, .precision = -1, .flags = SPECIAL | SMALL, }; static const struct printf_spec default_dec_spec = { .base = 10, .precision = -1, }; static const struct printf_spec default_dec02_spec = { .base = 10, .field_width = 2, .precision = -1, .flags = ZEROPAD, }; static const struct printf_spec default_dec04_spec = { .base = 10, .field_width = 4, .precision = -1, .flags = ZEROPAD, }; static noinline_for_stack char *hex_range(char *buf, char *end, u64 start_val, u64 end_val, struct printf_spec spec) { buf = number(buf, end, start_val, spec); if (start_val == end_val) return buf; if (buf < end) *buf = '-'; ++buf; return number(buf, end, end_val, spec); } static noinline_for_stack char *resource_string(char *buf, char *end, struct resource *res, struct printf_spec spec, const char *fmt) { #ifndef IO_RSRC_PRINTK_SIZE #define IO_RSRC_PRINTK_SIZE 6 #endif #ifndef MEM_RSRC_PRINTK_SIZE #define MEM_RSRC_PRINTK_SIZE 10 #endif static const struct printf_spec io_spec = { .base = 16, .field_width = IO_RSRC_PRINTK_SIZE, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; static const struct printf_spec mem_spec = { .base = 16, .field_width = MEM_RSRC_PRINTK_SIZE, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; static const struct printf_spec bus_spec = { .base = 16, .field_width = 2, .precision = -1, .flags = SMALL | ZEROPAD, }; static const struct printf_spec str_spec = { .field_width = -1, .precision = 10, .flags = LEFT, }; /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8) * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") char sym[MAX(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; char *p = sym, *pend = sym + sizeof(sym); int decode = (fmt[0] == 'R') ? 1 : 0; const struct printf_spec *specp; if (check_pointer(&buf, end, res, spec)) return buf; *p++ = '['; if (res->flags & IORESOURCE_IO) { p = string_nocheck(p, pend, "io ", str_spec); specp = &io_spec; } else if (res->flags & IORESOURCE_MEM) { p = string_nocheck(p, pend, "mem ", str_spec); specp = &mem_spec; } else if (res->flags & IORESOURCE_IRQ) { p = string_nocheck(p, pend, "irq ", str_spec); specp = &default_dec_spec; } else if (res->flags & IORESOURCE_DMA) { p = string_nocheck(p, pend, "dma ", str_spec); specp = &default_dec_spec; } else if (res->flags & IORESOURCE_BUS) { p = string_nocheck(p, pend, "bus ", str_spec); specp = &bus_spec; } else { p = string_nocheck(p, pend, "??? ", str_spec); specp = &mem_spec; decode = 0; } if (decode && res->flags & IORESOURCE_UNSET) { p = string_nocheck(p, pend, "size ", str_spec); p = number(p, pend, resource_size(res), *specp); } else { p = hex_range(p, pend, res->start, res->end, *specp); } if (decode) { if (res->flags & IORESOURCE_MEM_64) p = string_nocheck(p, pend, " 64bit", str_spec); if (res->flags & IORESOURCE_PREFETCH) p = string_nocheck(p, pend, " pref", str_spec); if (res->flags & IORESOURCE_WINDOW) p = string_nocheck(p, pend, " window", str_spec); if (res->flags & IORESOURCE_DISABLED) p = string_nocheck(p, pend, " disabled", str_spec); } else { p = string_nocheck(p, pend, " flags ", str_spec); p = number(p, pend, res->flags, default_flag_spec); } *p++ = ']'; *p = '\0'; return string_nocheck(buf, end, sym, spec); } static noinline_for_stack char *range_string(char *buf, char *end, const struct range *range, struct printf_spec spec, const char *fmt) { char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")]; char *p = sym, *pend = sym + sizeof(sym); struct printf_spec range_spec = { .field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */ .flags = SPECIAL | SMALL | ZEROPAD, .base = 16, .precision = -1, }; if (check_pointer(&buf, end, range, spec)) return buf; p = string_nocheck(p, pend, "[range ", default_str_spec); p = hex_range(p, pend, range->start, range->end, range_spec); *p++ = ']'; *p = '\0'; return string_nocheck(buf, end, sym, spec); } static noinline_for_stack char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec, const char *fmt) { int i, len = 1; /* if we pass '%ph[CDN]', field width remains negative value, fallback to the default */ char separator; if (spec.field_width == 0) /* nothing to print */ return buf; if (check_pointer(&buf, end, addr, spec)) return buf; switch (fmt[1]) { case 'C': separator = ':'; break; case 'D': separator = '-'; break; case 'N': separator = 0; break; default: separator = ' '; break; } if (spec.field_width > 0) len = min_t(int, spec.field_width, 64); for (i = 0; i < len; ++i) { if (buf < end) *buf = hex_asc_hi(addr[i]); ++buf; if (buf < end) *buf = hex_asc_lo(addr[i]); ++buf; if (separator && i != len - 1) { if (buf < end) *buf = separator; ++buf; } } return buf; } static noinline_for_stack char *bitmap_string(char *buf, char *end, const unsigned long *bitmap, struct printf_spec spec, const char *fmt) { const int CHUNKSZ = 32; int nr_bits = max_t(int, spec.field_width, 0); int i, chunksz; bool first = true; if (check_pointer(&buf, end, bitmap, spec)) return buf; /* reused to print numbers */ spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 }; chunksz = nr_bits & (CHUNKSZ - 1); if (chunksz == 0) chunksz = CHUNKSZ; i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ; for (; i >= 0; i -= CHUNKSZ) { u32 chunkmask, val; int word, bit; chunkmask = ((1ULL << chunksz) - 1); word = i / BITS_PER_LONG; bit = i % BITS_PER_LONG; val = (bitmap[word] >> bit) & chunkmask; if (!first) { if (buf < end) *buf = ','; buf++; } first = false; spec.field_width = DIV_ROUND_UP(chunksz, 4); buf = number(buf, end, val, spec); chunksz = CHUNKSZ; } return buf; } static noinline_for_stack char *bitmap_list_string(char *buf, char *end, const unsigned long *bitmap, struct printf_spec spec, const char *fmt) { int nr_bits = max_t(int, spec.field_width, 0); bool first = true; int rbot, rtop; if (check_pointer(&buf, end, bitmap, spec)) return buf; for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) { if (!first) { if (buf < end) *buf = ','; buf++; } first = false; buf = number(buf, end, rbot, default_dec_spec); if (rtop == rbot + 1) continue; if (buf < end) *buf = '-'; buf = number(++buf, end, rtop - 1, default_dec_spec); } return buf; } static noinline_for_stack char *mac_address_string(char *buf, char *end, u8 *addr, struct printf_spec spec, const char *fmt) { char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; char separator; bool reversed = false; if (check_pointer(&buf, end, addr, spec)) return buf; switch (fmt[1]) { case 'F': separator = '-'; break; case 'R': reversed = true; fallthrough; default: separator = ':'; break; } for (i = 0; i < 6; i++) { if (reversed) p = hex_byte_pack(p, addr[5 - i]); else p = hex_byte_pack(p, addr[i]); if (fmt[0] == 'M' && i != 5) *p++ = separator; } *p = '\0'; return string_nocheck(buf, end, mac_addr, spec); } static noinline_for_stack char *ip4_string(char *p, const u8 *addr, const char *fmt) { int i; bool leading_zeros = (fmt[0] == 'i'); int index; int step; switch (fmt[2]) { case 'h': #ifdef __BIG_ENDIAN index = 0; step = 1; #else index = 3; step = -1; #endif break; case 'l': index = 3; step = -1; break; case 'n': case 'b': default: index = 0; step = 1; break; } for (i = 0; i < 4; i++) { char temp[4] __aligned(2); /* hold each IP quad in reverse order */ int digits = put_dec_trunc8(temp, addr[index]) - temp; if (leading_zeros) { if (digits < 3) *p++ = '0'; if (digits < 2) *p++ = '0'; } /* reverse the digits in the quad */ while (digits--) *p++ = temp[digits]; if (i < 3) *p++ = '.'; index += step; } *p = '\0'; return p; } static noinline_for_stack char *ip6_compressed_string(char *p, const char *addr) { int i, j, range; unsigned char zerolength[8]; int longest = 1; int colonpos = -1; u16 word; u8 hi, lo; bool needcolon = false; bool useIPv4; struct in6_addr in6; memcpy(&in6, addr, sizeof(struct in6_addr)); useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6); memset(zerolength, 0, sizeof(zerolength)); if (useIPv4) range = 6; else range = 8; /* find position of longest 0 run */ for (i = 0; i < range; i++) { for (j = i; j < range; j++) { if (in6.s6_addr16[j] != 0) break; zerolength[i]++; } } for (i = 0; i < range; i++) { if (zerolength[i] > longest) { longest = zerolength[i]; colonpos = i; } } if (longest == 1) /* don't compress a single 0 */ colonpos = -1; /* emit address */ for (i = 0; i < range; i++) { if (i == colonpos) { if (needcolon || i == 0) *p++ = ':'; *p++ = ':'; needcolon = false; i += longest - 1; continue; } if (needcolon) { *p++ = ':'; needcolon = false; } /* hex u16 without leading 0s */ word = ntohs(in6.s6_addr16[i]); hi = word >> 8; lo = word & 0xff; if (hi) { if (hi > 0x0f) p = hex_byte_pack(p, hi); else *p++ = hex_asc_lo(hi); p = hex_byte_pack(p, lo); } else if (lo > 0x0f) p = hex_byte_pack(p, lo); else *p++ = hex_asc_lo(lo); needcolon = true; } if (useIPv4) { if (needcolon) *p++ = ':'; p = ip4_string(p, &in6.s6_addr[12], "I4"); } *p = '\0'; return p; } static noinline_for_stack char *ip6_string(char *p, const char *addr, const char *fmt) { int i; for (i = 0; i < 8; i++) { p = hex_byte_pack(p, *addr++); p = hex_byte_pack(p, *addr++); if (fmt[0] == 'I' && i != 7) *p++ = ':'; } *p = '\0'; return p; } static noinline_for_stack char *ip6_addr_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; if (fmt[0] == 'I' && fmt[2] == 'c') ip6_compressed_string(ip6_addr, addr); else ip6_string(ip6_addr, addr, fmt); return string_nocheck(buf, end, ip6_addr, spec); } static noinline_for_stack char *ip4_addr_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { char ip4_addr[sizeof("255.255.255.255")]; ip4_string(ip4_addr, addr, fmt); return string_nocheck(buf, end, ip4_addr, spec); } static noinline_for_stack char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa, struct printf_spec spec, const char *fmt) { bool have_p = false, have_s = false, have_f = false, have_c = false; char ip6_addr[sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") + sizeof(":12345") + sizeof("/123456789") + sizeof("%1234567890")]; char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr); const u8 *addr = (const u8 *) &sa->sin6_addr; char fmt6[2] = { fmt[0], '6' }; u8 off = 0; fmt++; while (isalpha(*++fmt)) { switch (*fmt) { case 'p': have_p = true; break; case 'f': have_f = true; break; case 's': have_s = true; break; case 'c': have_c = true; break; } } if (have_p || have_s || have_f) { *p = '['; off = 1; } if (fmt6[0] == 'I' && have_c) p = ip6_compressed_string(ip6_addr + off, addr); else p = ip6_string(ip6_addr + off, addr, fmt6); if (have_p || have_s || have_f) *p++ = ']'; if (have_p) { *p++ = ':'; p = number(p, pend, ntohs(sa->sin6_port), spec); } if (have_f) { *p++ = '/'; p = number(p, pend, ntohl(sa->sin6_flowinfo & IPV6_FLOWINFO_MASK), spec); } if (have_s) { *p++ = '%'; p = number(p, pend, sa->sin6_scope_id, spec); } *p = '\0'; return string_nocheck(buf, end, ip6_addr, spec); } static noinline_for_stack char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa, struct printf_spec spec, const char *fmt) { bool have_p = false; char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")]; char *pend = ip4_addr + sizeof(ip4_addr); const u8 *addr = (const u8 *) &sa->sin_addr.s_addr; char fmt4[3] = { fmt[0], '4', 0 }; fmt++; while (isalpha(*++fmt)) { switch (*fmt) { case 'p': have_p = true; break; case 'h': case 'l': case 'n': case 'b': fmt4[2] = *fmt; break; } } p = ip4_string(ip4_addr, addr, fmt4); if (have_p) { *p++ = ':'; p = number(p, pend, ntohs(sa->sin_port), spec); } *p = '\0'; return string_nocheck(buf, end, ip4_addr, spec); } static noinline_for_stack char *ip_addr_string(char *buf, char *end, const void *ptr, struct printf_spec spec, const char *fmt) { char *err_fmt_msg; if (check_pointer(&buf, end, ptr, spec)) return buf; switch (fmt[1]) { case '6': return ip6_addr_string(buf, end, ptr, spec, fmt); case '4': return ip4_addr_string(buf, end, ptr, spec, fmt); case 'S': { const union { struct sockaddr raw; struct sockaddr_in v4; struct sockaddr_in6 v6; } *sa = ptr; switch (sa->raw.sa_family) { case AF_INET: return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt); case AF_INET6: return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt); default: return error_string(buf, end, "(einval)", spec); }} } err_fmt_msg = fmt[0] == 'i' ? "(%pi?)" : "(%pI?)"; return error_string(buf, end, err_fmt_msg, spec); } static noinline_for_stack char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec, const char *fmt) { bool found = true; int count = 1; unsigned int flags = 0; int len; if (spec.field_width == 0) return buf; /* nothing to print */ if (check_pointer(&buf, end, addr, spec)) return buf; do { switch (fmt[count++]) { case 'a': flags |= ESCAPE_ANY; break; case 'c': flags |= ESCAPE_SPECIAL; break; case 'h': flags |= ESCAPE_HEX; break; case 'n': flags |= ESCAPE_NULL; break; case 'o': flags |= ESCAPE_OCTAL; break; case 'p': flags |= ESCAPE_NP; break; case 's': flags |= ESCAPE_SPACE; break; default: found = false; break; } } while (found); if (!flags) flags = ESCAPE_ANY_NP; len = spec.field_width < 0 ? 1 : spec.field_width; /* * string_escape_mem() writes as many characters as it can to * the given buffer, and returns the total size of the output * had the buffer been big enough. */ buf += string_escape_mem(addr, len, buf, buf < end ? end - buf : 0, flags, NULL); return buf; } #pragma GCC diagnostic push #ifndef __clang__ #pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif static char *va_format(char *buf, char *end, struct va_format *va_fmt, struct printf_spec spec) { va_list va; if (check_pointer(&buf, end, va_fmt, spec)) return buf; va_copy(va, *va_fmt->va); buf += vsnprintf(buf, end > buf ? end - buf : 0, va_fmt->fmt, va); va_end(va); return buf; } #pragma GCC diagnostic pop static noinline_for_stack char *uuid_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { char uuid[UUID_STRING_LEN + 1]; char *p = uuid; int i; const u8 *index = uuid_index; bool uc = false; if (check_pointer(&buf, end, addr, spec)) return buf; switch (*(++fmt)) { case 'L': uc = true; fallthrough; case 'l': index = guid_index; break; case 'B': uc = true; break; } for (i = 0; i < 16; i++) { if (uc) p = hex_byte_pack_upper(p, addr[index[i]]); else p = hex_byte_pack(p, addr[index[i]]); switch (i) { case 3: case 5: case 7: case 9: *p++ = '-'; break; } } *p = 0; return string_nocheck(buf, end, uuid, spec); } static noinline_for_stack char *netdev_bits(char *buf, char *end, const void *addr, struct printf_spec spec, const char *fmt) { unsigned long long num; int size; if (check_pointer(&buf, end, addr, spec)) return buf; switch (fmt[1]) { case 'F': num = *(const netdev_features_t *)addr; size = sizeof(netdev_features_t); break; default: return error_string(buf, end, "(%pN?)", spec); } return special_hex_number(buf, end, num, size); } static noinline_for_stack char *fourcc_string(char *buf, char *end, const u32 *fourcc, struct printf_spec spec, const char *fmt) { char output[sizeof("0123 little-endian (0x01234567)")]; char *p = output; unsigned int i; u32 orig, val; if (fmt[1] != 'c' || fmt[2] != 'c') return error_string(buf, end, "(%p4?)", spec); if (check_pointer(&buf, end, fourcc, spec)) return buf; orig = get_unaligned(fourcc); val = orig & ~BIT(31); for (i = 0; i < sizeof(u32); i++) { unsigned char c = val >> (i * 8); /* Print non-control ASCII characters as-is, dot otherwise */ *p++ = isascii(c) && isprint(c) ? c : '.'; } *p++ = ' '; strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian"); p += strlen(p); *p++ = ' '; *p++ = '('; p = special_hex_number(p, output + sizeof(output) - 2, orig, sizeof(u32)); *p++ = ')'; *p = '\0'; return string(buf, end, output, spec); } static noinline_for_stack char *address_val(char *buf, char *end, const void *addr, struct printf_spec spec, const char *fmt) { unsigned long long num; int size; if (check_pointer(&buf, end, addr, spec)) return buf; switch (fmt[1]) { case 'd': num = *(const dma_addr_t *)addr; size = sizeof(dma_addr_t); break; case 'p': default: num = *(const phys_addr_t *)addr; size = sizeof(phys_addr_t); break; } return special_hex_number(buf, end, num, size); } static noinline_for_stack char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r) { int year = tm->tm_year + (r ? 0 : 1900); int mon = tm->tm_mon + (r ? 0 : 1); buf = number(buf, end, year, default_dec04_spec); if (buf < end) *buf = '-'; buf++; buf = number(buf, end, mon, default_dec02_spec); if (buf < end) *buf = '-'; buf++; return number(buf, end, tm->tm_mday, default_dec02_spec); } static noinline_for_stack char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r) { buf = number(buf, end, tm->tm_hour, default_dec02_spec); if (buf < end) *buf = ':'; buf++; buf = number(buf, end, tm->tm_min, default_dec02_spec); if (buf < end) *buf = ':'; buf++; return number(buf, end, tm->tm_sec, default_dec02_spec); } static noinline_for_stack char *rtc_str(char *buf, char *end, const struct rtc_time *tm, struct printf_spec spec, const char *fmt) { bool have_t = true, have_d = true; bool raw = false, iso8601_separator = true; bool found = true; int count = 2; if (check_pointer(&buf, end, tm, spec)) return buf; switch (fmt[count]) { case 'd': have_t = false; count++; break; case 't': have_d = false; count++; break; } do { switch (fmt[count++]) { case 'r': raw = true; break; case 's': iso8601_separator = false; break; default: found = false; break; } } while (found); if (have_d) buf = date_str(buf, end, tm, raw); if (have_d && have_t) { if (buf < end) *buf = iso8601_separator ? 'T' : ' '; buf++; } if (have_t) buf = time_str(buf, end, tm, raw); return buf; } static noinline_for_stack char *time64_str(char *buf, char *end, const time64_t time, struct printf_spec spec, const char *fmt) { struct rtc_time rtc_time; struct tm tm; time64_to_tm(time, 0, &tm); rtc_time.tm_sec = tm.tm_sec; rtc_time.tm_min = tm.tm_min; rtc_time.tm_hour = tm.tm_hour; rtc_time.tm_mday = tm.tm_mday; rtc_time.tm_mon = tm.tm_mon; rtc_time.tm_year = tm.tm_year; rtc_time.tm_wday = tm.tm_wday; rtc_time.tm_yday = tm.tm_yday; rtc_time.tm_isdst = 0; return rtc_str(buf, end, &rtc_time, spec, fmt); } static noinline_for_stack char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec, const char *fmt) { switch (fmt[1]) { case 'R': return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt); case 'T': return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt); default: return error_string(buf, end, "(%pt?)", spec); } } static noinline_for_stack char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, const char *fmt) { if (!IS_ENABLED(CONFIG_HAVE_CLK)) return error_string(buf, end, "(%pC?)", spec); if (check_pointer(&buf, end, clk, spec)) return buf; switch (fmt[1]) { case 'n': default: #ifdef CONFIG_COMMON_CLK return string(buf, end, __clk_get_name(clk), spec); #else return ptr_to_id(buf, end, clk, spec); #endif } } static char *format_flags(char *buf, char *end, unsigned long flags, const struct trace_print_flags *names) { unsigned long mask; for ( ; flags && names->name; names++) { mask = names->mask; if ((flags & mask) != mask) continue; buf = string(buf, end, names->name, default_str_spec); flags &= ~mask; if (flags) { if (buf < end) *buf = '|'; buf++; } } if (flags) buf = number(buf, end, flags, default_flag_spec); return buf; } struct page_flags_fields { int width; int shift; int mask; const struct printf_spec *spec; const char *name; }; static const struct page_flags_fields pff[] = { {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK, &default_dec_spec, "section"}, {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK, &default_dec_spec, "node"}, {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK, &default_dec_spec, "zone"}, {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK, &default_flag_spec, "lastcpupid"}, {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK, &default_flag_spec, "kasantag"}, }; static char *format_page_flags(char *buf, char *end, unsigned long flags) { unsigned long main_flags = flags & PAGEFLAGS_MASK; bool append = false; int i; buf = number(buf, end, flags, default_flag_spec); if (buf < end) *buf = '('; buf++; /* Page flags from the main area. */ if (main_flags) { buf = format_flags(buf, end, main_flags, pageflag_names); append = true; } /* Page flags from the fields area */ for (i = 0; i < ARRAY_SIZE(pff); i++) { /* Skip undefined fields. */ if (!pff[i].width) continue; /* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */ if (append) { if (buf < end) *buf = '|'; buf++; } buf = string(buf, end, pff[i].name, default_str_spec); if (buf < end) *buf = '='; buf++; buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask, *pff[i].spec); append = true; } if (buf < end) *buf = ')'; buf++; return buf; } static noinline_for_stack char *flags_string(char *buf, char *end, void *flags_ptr, struct printf_spec spec, const char *fmt) { unsigned long flags; const struct trace_print_flags *names; if (check_pointer(&buf, end, flags_ptr, spec)) return buf; switch (fmt[1]) { case 'p': return format_page_flags(buf, end, *(unsigned long *)flags_ptr); case 'v': flags = *(unsigned long *)flags_ptr; names = vmaflag_names; break; case 'g': flags = (__force unsigned long)(*(gfp_t *)flags_ptr); names = gfpflag_names; break; default: return error_string(buf, end, "(%pG?)", spec); } return format_flags(buf, end, flags, names); } static noinline_for_stack char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf, char *end) { int depth; /* Loop starting from the root node to the current node. */ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) { /* * Only get a reference for other nodes (i.e. parent nodes). * fwnode refcount may be 0 here. */ struct fwnode_handle *__fwnode = depth ? fwnode_get_nth_parent(fwnode, depth) : fwnode; buf = string(buf, end, fwnode_get_name_prefix(__fwnode), default_str_spec); buf = string(buf, end, fwnode_get_name(__fwnode), default_str_spec); if (depth) fwnode_handle_put(__fwnode); } return buf; } static noinline_for_stack char *device_node_string(char *buf, char *end, struct device_node *dn, struct printf_spec spec, const char *fmt) { char tbuf[sizeof("xxxx") + 1]; const char *p; int ret; char *buf_start = buf; struct property *prop; bool has_mult, pass; struct printf_spec str_spec = spec; str_spec.field_width = -1; if (fmt[0] != 'F') return error_string(buf, end, "(%pO?)", spec); if (!IS_ENABLED(CONFIG_OF)) return error_string(buf, end, "(%pOF?)", spec); if (check_pointer(&buf, end, dn, spec)) return buf; /* simple case without anything any more format specifiers */ fmt++; if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0) fmt = "f"; for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) { int precision; if (pass) { if (buf < end) *buf = ':'; buf++; } switch (*fmt) { case 'f': /* full_name */ buf = fwnode_full_name_string(of_fwnode_handle(dn), buf, end); break; case 'n': /* name */ p = fwnode_get_name(of_fwnode_handle(dn)); precision = str_spec.precision; str_spec.precision = strchrnul(p, '@') - p; buf = string(buf, end, p, str_spec); str_spec.precision = precision; break; case 'p': /* phandle */ buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec); break; case 'P': /* path-spec */ p = fwnode_get_name(of_fwnode_handle(dn)); if (!p[1]) p = "/"; buf = string(buf, end, p, str_spec); break; case 'F': /* flags */ tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-'; tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-'; tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-'; tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-'; tbuf[4] = 0; buf = string_nocheck(buf, end, tbuf, str_spec); break; case 'c': /* major compatible string */ ret = of_property_read_string(dn, "compatible", &p); if (!ret) buf = string(buf, end, p, str_spec); break; case 'C': /* full compatible string */ has_mult = false; of_property_for_each_string(dn, "compatible", prop, p) { if (has_mult) buf = string_nocheck(buf, end, ",", str_spec); buf = string_nocheck(buf, end, "\"", str_spec); buf = string(buf, end, p, str_spec); buf = string_nocheck(buf, end, "\"", str_spec); has_mult = true; } break; default: break; } } return widen_string(buf, buf - buf_start, end, spec); } static noinline_for_stack char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode, struct printf_spec spec, const char *fmt) { struct printf_spec str_spec = spec; char *buf_start = buf; str_spec.field_width = -1; if (*fmt != 'w') return error_string(buf, end, "(%pf?)", spec); if (check_pointer(&buf, end, fwnode, spec)) return buf; fmt++; switch (*fmt) { case 'P': /* name */ buf = string(buf, end, fwnode_get_name(fwnode), str_spec); break; case 'f': /* full_name */ default: buf = fwnode_full_name_string(fwnode, buf, end); break; } return widen_string(buf, buf - buf_start, end, spec); } static noinline_for_stack char *resource_or_range(const char *fmt, char *buf, char *end, void *ptr, struct printf_spec spec) { if (*fmt == 'r' && fmt[1] == 'a') return range_string(buf, end, ptr, spec, fmt); return resource_string(buf, end, ptr, spec, fmt); } int __init no_hash_pointers_enable(char *str) { if (no_hash_pointers) return 0; no_hash_pointers = true; pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** This system shows unhashed kernel memory addresses **\n"); pr_warn("** via the console, logs, and other interfaces. This **\n"); pr_warn("** might reduce the security of your system. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your system **\n"); pr_warn("** administrator! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); return 0; } early_param("no_hash_pointers", no_hash_pointers_enable); /* * Show a '%p' thing. A kernel extension is that the '%p' is followed * by an extra set of alphanumeric characters that are extended format * specifiers. * * Please update scripts/checkpatch.pl when adding/removing conversion * characters. (Search for "check for vsprintf extension"). * * Right now we handle: * * - 'S' For symbolic direct pointers (or function descriptors) with offset * - 's' For symbolic direct pointers (or function descriptors) without offset * - '[Ss]R' as above with __builtin_extract_return_addr() translation * - 'S[R]b' as above with module build ID (for use in backtraces) * - '[Ff]' %pf and %pF were obsoleted and later removed in favor of * %ps and %pS. Be careful when re-using these specifiers. * - 'B' For backtraced symbolic direct pointers with offset * - 'Bb' as above with module build ID (for use in backtraces) * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] * - 'ra' For struct ranges, e.g., [range 0x0000000000000000 - 0x00000000000000ff] * - 'b[l]' For a bitmap, the number of bits is determined by the field * width which must be explicitly specified either as part of the * format string '%32b[l]' or through '%*b[l]', [l] selects * range-list format instead of hex format * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation * - 'm' For a 6-byte MAC address, it prints the hex address without colons * - 'MF' For a 6-byte MAC FDDI address, it prints the address * with a dash-separated hex notation * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth) * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's * [S][pfs] * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to * [4] or [6] and is able to print port [p], flowinfo [f], scope [s] * - 'i' [46] for 'raw' IPv4/IPv6 addresses * IPv6 omits the colons (01020304...0f) * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) * [S][pfs] * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to * [4] or [6] and is able to print port [p], flowinfo [f], scope [s] * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order * - 'I[6S]c' for IPv6 addresses printed as specified by * https://tools.ietf.org/html/rfc5952 * - 'E[achnops]' For an escaped buffer, where rules are defined by combination * of the following flags (see string_escape_mem() for the * details): * a - ESCAPE_ANY * c - ESCAPE_SPECIAL * h - ESCAPE_HEX * n - ESCAPE_NULL * o - ESCAPE_OCTAL * p - ESCAPE_NP * s - ESCAPE_SPACE * By default ESCAPE_ANY_NP is used. * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" * Options for %pU are: * b big endian lower case hex (default) * B big endian UPPER case hex * l little endian lower case hex * L little endian UPPER case hex * big endian output byte order is: * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] * little endian output byte order is: * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] * - 'V' For a struct va_format which contains a format string * and va_list *, * call vsnprintf(->format, *->va_list). * Implements a "recursive vsnprintf". * Do not use this feature without some mechanism to verify the * correctness of the format string and va_list arguments. * - 'K' For a kernel pointer that should be hidden from unprivileged users. * Use only for procfs, sysfs and similar files, not printk(); please * read the documentation (path below) first. * - 'NF' For a netdev_features_t * - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value. * - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with * a certain separator (' ' by default): * C colon * D dash * N no separator * The maximum supported length is 64 bytes of the input. Consider * to use print_hex_dump() for the larger input. * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives * (default assumed to be phys_addr_t, passed by reference) * - 'd[234]' For a dentry name (optionally 2-4 last components) * - 'D[234]' Same as 'd' but for a struct file * - 'g' For block_device name (gendisk + partition number) * - 't[RT][dt][r][s]' For time and date as represented by: * R struct rtc_time * T time64_t * - 'C' For a clock, it prints the name (Common Clock Framework) or address * (legacy clock framework) of the clock * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address * (legacy clock framework) of the clock * - 'G' For flags to be printed as a collection of symbolic strings that would * construct the specific value. Supported flags given by option: * p page flags (see struct page) given as pointer to unsigned long * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t * v vma flags (VM_*) given as pointer to unsigned long * - 'OF[fnpPcCF]' For a device tree object * Without any optional arguments prints the full_name * f device node full_name * n device node name * p device node phandle * P device node path spec (name + @unit) * F device node flags * c major compatible string * C full compatible string * - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer * Without an option prints the full name of the node * f full name * P node name, including a possible unit address * - 'x' For printing the address unmodified. Equivalent to "%lx". * Please read the documentation (path below) before using! * - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of * bpf_trace_printk() where [ku] prefix specifies either kernel (k) * or user (u) memory to probe, and: * s a string, equivalent to "%s" on direct vsnprintf() use * * ** When making changes please also update: * Documentation/core-api/printk-formats.rst * * Note: The default behaviour (unadorned %p) is to hash the address, * rendering it useful as a unique identifier. * * There is also a '%pA' format specifier, but it is only intended to be used * from Rust code to format core::fmt::Arguments. Do *not* use it from C. * See rust/kernel/print.rs for details. */ static noinline_for_stack char *pointer(const char *fmt, char *buf, char *end, void *ptr, struct printf_spec spec) { switch (*fmt) { case 'S': case 's': ptr = dereference_symbol_descriptor(ptr); fallthrough; case 'B': return symbol_string(buf, end, ptr, spec, fmt); case 'R': case 'r': return resource_or_range(fmt, buf, end, ptr, spec); case 'h': return hex_string(buf, end, ptr, spec, fmt); case 'b': switch (fmt[1]) { case 'l': return bitmap_list_string(buf, end, ptr, spec, fmt); default: return bitmap_string(buf, end, ptr, spec, fmt); } case 'M': /* Colon separated: 00:01:02:03:04:05 */ case 'm': /* Contiguous: 000102030405 */ /* [mM]F (FDDI) */ /* [mM]R (Reverse order; Bluetooth) */ return mac_address_string(buf, end, ptr, spec, fmt); case 'I': /* Formatted IP supported * 4: 1.2.3.4 * 6: 0001:0203:...:0708 * 6c: 1::708 or 1::1.2.3.4 */ case 'i': /* Contiguous: * 4: 001.002.003.004 * 6: 000102...0f */ return ip_addr_string(buf, end, ptr, spec, fmt); case 'E': return escaped_string(buf, end, ptr, spec, fmt); case 'U': return uuid_string(buf, end, ptr, spec, fmt); case 'V': return va_format(buf, end, ptr, spec); case 'K': return restricted_pointer(buf, end, ptr, spec); case 'N': return netdev_bits(buf, end, ptr, spec, fmt); case '4': return fourcc_string(buf, end, ptr, spec, fmt); case 'a': return address_val(buf, end, ptr, spec, fmt); case 'd': return dentry_name(buf, end, ptr, spec, fmt); case 't': return time_and_date(buf, end, ptr, spec, fmt); case 'C': return clock(buf, end, ptr, spec, fmt); case 'D': return file_dentry_name(buf, end, ptr, spec, fmt); #ifdef CONFIG_BLOCK case 'g': return bdev_name(buf, end, ptr, spec, fmt); #endif case 'G': return flags_string(buf, end, ptr, spec, fmt); case 'O': return device_node_string(buf, end, ptr, spec, fmt + 1); case 'f': return fwnode_string(buf, end, ptr, spec, fmt + 1); case 'A': if (!IS_ENABLED(CONFIG_RUST)) { WARN_ONCE(1, "Please remove %%pA from non-Rust code\n"); return error_string(buf, end, "(%pA?)", spec); } return rust_fmt_argument(buf, end, ptr); case 'x': return pointer_string(buf, end, ptr, spec); case 'e': /* %pe with a non-ERR_PTR gets treated as plain %p */ if (!IS_ERR(ptr)) return default_pointer(buf, end, ptr, spec); return err_ptr(buf, end, ptr, spec); case 'u': case 'k': switch (fmt[1]) { case 's': return string(buf, end, ptr, spec); default: return error_string(buf, end, "(einval)", spec); } default: return default_pointer(buf, end, ptr, spec); } } struct fmt { const char *str; unsigned char state; // enum format_state unsigned char size; // size of numbers }; #define SPEC_CHAR(x, flag) [(x)-32] = flag static unsigned char spec_flag(unsigned char c) { static const unsigned char spec_flag_array[] = { SPEC_CHAR(' ', SPACE), SPEC_CHAR('#', SPECIAL), SPEC_CHAR('+', PLUS), SPEC_CHAR('-', LEFT), SPEC_CHAR('0', ZEROPAD), }; c -= 32; return (c < sizeof(spec_flag_array)) ? spec_flag_array[c] : 0; } /* * Helper function to decode printf style format. * Each call decode a token from the format and return the * number of characters read (or likely the delta where it wants * to go on the next call). * The decoded token is returned through the parameters * * 'h', 'l', or 'L' for integer fields * 'z' support added 23/7/1999 S.H. * 'z' changed to 'Z' --davidm 1/25/99 * 'Z' changed to 'z' --adobriyan 2017-01-25 * 't' added for ptrdiff_t * * @fmt: the format string * @type of the token returned * @flags: various flags such as +, -, # tokens.. * @field_width: overwritten width * @base: base of the number (octal, hex, ...) * @precision: precision of a number * @qualifier: qualifier of a number (long, size_t, ...) */ static noinline_for_stack struct fmt format_decode(struct fmt fmt, struct printf_spec *spec) { const char *start = fmt.str; char flag; /* we finished early by reading the field width */ if (unlikely(fmt.state == FORMAT_STATE_WIDTH)) { if (spec->field_width < 0) { spec->field_width = -spec->field_width; spec->flags |= LEFT; } fmt.state = FORMAT_STATE_NONE; goto precision; } /* we finished early by reading the precision */ if (unlikely(fmt.state == FORMAT_STATE_PRECISION)) { if (spec->precision < 0) spec->precision = 0; fmt.state = FORMAT_STATE_NONE; goto qualifier; } /* By default */ fmt.state = FORMAT_STATE_NONE; for (; *fmt.str ; fmt.str++) { if (*fmt.str == '%') break; } /* Return the current non-format string */ if (fmt.str != start || !*fmt.str) return fmt; /* Process flags. This also skips the first '%' */ spec->flags = 0; do { /* this also skips first '%' */ flag = spec_flag(*++fmt.str); spec->flags |= flag; } while (flag); /* get field width */ spec->field_width = -1; if (isdigit(*fmt.str)) spec->field_width = skip_atoi(&fmt.str); else if (unlikely(*fmt.str == '*')) { /* it's the next argument */ fmt.state = FORMAT_STATE_WIDTH; fmt.str++; return fmt; } precision: /* get the precision */ spec->precision = -1; if (unlikely(*fmt.str == '.')) { fmt.str++; if (isdigit(*fmt.str)) { spec->precision = skip_atoi(&fmt.str); if (spec->precision < 0) spec->precision = 0; } else if (*fmt.str == '*') { /* it's the next argument */ fmt.state = FORMAT_STATE_PRECISION; fmt.str++; return fmt; } } qualifier: /* Set up default numeric format */ spec->base = 10; fmt.state = FORMAT_STATE_NUM; fmt.size = sizeof(int); static const struct format_state { unsigned char state; unsigned char size; unsigned char flags_or_double_size; unsigned char base; } lookup_state[256] = { // Length ['l'] = { 0, sizeof(long), sizeof(long long) }, ['L'] = { 0, sizeof(long long) }, ['h'] = { 0, sizeof(short), sizeof(char) }, ['H'] = { 0, sizeof(char) }, // Questionable historical ['z'] = { 0, sizeof(size_t) }, ['t'] = { 0, sizeof(ptrdiff_t) }, // Non-numeric formats ['c'] = { FORMAT_STATE_CHAR }, ['s'] = { FORMAT_STATE_STR }, ['p'] = { FORMAT_STATE_PTR }, ['%'] = { FORMAT_STATE_PERCENT_CHAR }, // Numerics ['o'] = { FORMAT_STATE_NUM, 0, 0, 8 }, ['x'] = { FORMAT_STATE_NUM, 0, SMALL, 16 }, ['X'] = { FORMAT_STATE_NUM, 0, 0, 16 }, ['d'] = { FORMAT_STATE_NUM, 0, SIGN, 10 }, ['i'] = { FORMAT_STATE_NUM, 0, SIGN, 10 }, ['u'] = { FORMAT_STATE_NUM, 0, 0, 10, }, /* * Since %n poses a greater security risk than * utility, treat it as any other invalid or * unsupported format specifier. */ }; const struct format_state *p = lookup_state + (u8)*fmt.str; if (p->size) { fmt.size = p->size; if (p->flags_or_double_size && fmt.str[0] == fmt.str[1]) { fmt.size = p->flags_or_double_size; fmt.str++; } fmt.str++; p = lookup_state + *fmt.str; } if (p->state) { if (p->base) spec->base = p->base; spec->flags |= p->flags_or_double_size; fmt.state = p->state; fmt.str++; return fmt; } WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt.str); fmt.state = FORMAT_STATE_INVALID; return fmt; } static void set_field_width(struct printf_spec *spec, int width) { spec->field_width = width; if (WARN_ONCE(spec->field_width != width, "field width %d too large", width)) { spec->field_width = clamp(width, -FIELD_WIDTH_MAX, FIELD_WIDTH_MAX); } } static void set_precision(struct printf_spec *spec, int prec) { spec->precision = prec; if (WARN_ONCE(spec->precision != prec, "precision %d too large", prec)) { spec->precision = clamp(prec, 0, PRECISION_MAX); } } /* * Turn a 1/2/4-byte value into a 64-bit one for printing: truncate * as necessary and deal with signedness. * * 'size' is the size of the value in bytes. */ static unsigned long long convert_num_spec(unsigned int val, int size, struct printf_spec spec) { unsigned int shift = 32 - size*8; val <<= shift; if (!(spec.flags & SIGN)) return val >> shift; return (int)val >> shift; } /** * vsnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt_str: The format string to use * @args: Arguments for the format string * * This function generally follows C99 vsnprintf, but has some * extensions and a few limitations: * * - ``%n`` is unsupported * - ``%p*`` is handled by pointer() * * See pointer() or Documentation/core-api/printk-formats.rst for more * extensive description. * * **Please update the documentation in both places when making changes** * * The return value is the number of characters which would * be generated for the given input, excluding the trailing * '\0', as per ISO C99. If you want to have the exact * number of characters written into @buf as return value * (not including the trailing '\0'), use vscnprintf(). If the * return is greater than or equal to @size, the resulting * string is truncated. * * If you're not already dealing with a va_list consider using snprintf(). */ int vsnprintf(char *buf, size_t size, const char *fmt_str, va_list args) { char *str, *end; struct printf_spec spec = {0}; struct fmt fmt = { .str = fmt_str, .state = FORMAT_STATE_NONE, }; /* Reject out-of-range values early. Large positive sizes are used for unknown buffer sizes. */ if (WARN_ON_ONCE(size > INT_MAX)) return 0; str = buf; end = buf + size; /* Make sure end is always >= buf */ if (end < buf) { end = ((void *)-1); size = end - buf; } while (*fmt.str) { const char *old_fmt = fmt.str; fmt = format_decode(fmt, &spec); switch (fmt.state) { case FORMAT_STATE_NONE: { int read = fmt.str - old_fmt; if (str < end) { int copy = read; if (copy > end - str) copy = end - str; memcpy(str, old_fmt, copy); } str += read; continue; } case FORMAT_STATE_NUM: { unsigned long long num; if (fmt.size <= sizeof(int)) num = convert_num_spec(va_arg(args, int), fmt.size, spec); else num = va_arg(args, long long); str = number(str, end, num, spec); continue; } case FORMAT_STATE_WIDTH: set_field_width(&spec, va_arg(args, int)); continue; case FORMAT_STATE_PRECISION: set_precision(&spec, va_arg(args, int)); continue; case FORMAT_STATE_CHAR: { char c; if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } } c = (unsigned char) va_arg(args, int); if (str < end) *str = c; ++str; while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } continue; } case FORMAT_STATE_STR: str = string(str, end, va_arg(args, char *), spec); continue; case FORMAT_STATE_PTR: str = pointer(fmt.str, str, end, va_arg(args, void *), spec); while (isalnum(*fmt.str)) fmt.str++; continue; case FORMAT_STATE_PERCENT_CHAR: if (str < end) *str = '%'; ++str; continue; default: /* * Presumably the arguments passed gcc's type * checking, but there is no safe or sane way * for us to continue parsing the format and * fetching from the va_list; the remaining * specifiers and arguments would be out of * sync. */ goto out; } } out: if (size > 0) { if (str < end) *str = '\0'; else end[-1] = '\0'; } /* the trailing null byte doesn't count towards the total */ return str-buf; } EXPORT_SYMBOL(vsnprintf); /** * vscnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @args: Arguments for the format string * * The return value is the number of characters which have been written into * the @buf not including the trailing '\0'. If @size is == 0 the function * returns 0. * * If you're not already dealing with a va_list consider using scnprintf(). * * See the vsnprintf() documentation for format string extensions over C99. */ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) { int i; if (unlikely(!size)) return 0; i = vsnprintf(buf, size, fmt, args); if (likely(i < size)) return i; return size - 1; } EXPORT_SYMBOL(vscnprintf); /** * snprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @...: Arguments for the format string * * The return value is the number of characters which would be * generated for the given input, excluding the trailing null, * as per ISO C99. If the return is greater than or equal to * @size, the resulting string is truncated. * * See the vsnprintf() documentation for format string extensions over C99. */ int snprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsnprintf(buf, size, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(snprintf); /** * scnprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt: The format string to use * @...: Arguments for the format string * * The return value is the number of characters written into @buf not including * the trailing '\0'. If @size is == 0 the function returns 0. */ int scnprintf(char *buf, size_t size, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vscnprintf(buf, size, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(scnprintf); /** * vsprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @fmt: The format string to use * @args: Arguments for the format string * * The function returns the number of characters written * into @buf. Use vsnprintf() or vscnprintf() in order to avoid * buffer overflows. * * If you're not already dealing with a va_list consider using sprintf(). * * See the vsnprintf() documentation for format string extensions over C99. */ int vsprintf(char *buf, const char *fmt, va_list args) { return vsnprintf(buf, INT_MAX, fmt, args); } EXPORT_SYMBOL(vsprintf); /** * sprintf - Format a string and place it in a buffer * @buf: The buffer to place the result into * @fmt: The format string to use * @...: Arguments for the format string * * The function returns the number of characters written * into @buf. Use snprintf() or scnprintf() in order to avoid * buffer overflows. * * See the vsnprintf() documentation for format string extensions over C99. */ int sprintf(char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsnprintf(buf, INT_MAX, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(sprintf); #ifdef CONFIG_BINARY_PRINTF /* * bprintf service: * vbin_printf() - VA arguments to binary data * bstr_printf() - Binary data to text string */ /** * vbin_printf - Parse a format string and place args' binary value in a buffer * @bin_buf: The buffer to place args' binary value * @size: The size of the buffer(by words(32bits), not characters) * @fmt_str: The format string to use * @args: Arguments for the format string * * The format follows C99 vsnprintf, except %n is ignored, and its argument * is skipped. * * The return value is the number of words(32bits) which would be generated for * the given input. * * NOTE: * If the return value is greater than @size, the resulting bin_buf is NOT * valid for bstr_printf(). */ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt_str, va_list args) { struct fmt fmt = { .str = fmt_str, .state = FORMAT_STATE_NONE, }; struct printf_spec spec = {0}; char *str, *end; int width; str = (char *)bin_buf; end = (char *)(bin_buf + size); #define save_arg(type) \ ({ \ unsigned long long value; \ if (sizeof(type) == 8) { \ unsigned long long val8; \ str = PTR_ALIGN(str, sizeof(u32)); \ val8 = va_arg(args, unsigned long long); \ if (str + sizeof(type) <= end) { \ *(u32 *)str = *(u32 *)&val8; \ *(u32 *)(str + 4) = *((u32 *)&val8 + 1); \ } \ value = val8; \ } else { \ unsigned int val4; \ str = PTR_ALIGN(str, sizeof(type)); \ val4 = va_arg(args, int); \ if (str + sizeof(type) <= end) \ *(typeof(type) *)str = (type)(long)val4; \ value = (unsigned long long)val4; \ } \ str += sizeof(type); \ value; \ }) while (*fmt.str) { fmt = format_decode(fmt, &spec); switch (fmt.state) { case FORMAT_STATE_NONE: case FORMAT_STATE_PERCENT_CHAR: break; case FORMAT_STATE_INVALID: goto out; case FORMAT_STATE_WIDTH: case FORMAT_STATE_PRECISION: width = (int)save_arg(int); /* Pointers may require the width */ if (*fmt.str == 'p') set_field_width(&spec, width); break; case FORMAT_STATE_CHAR: save_arg(char); break; case FORMAT_STATE_STR: { const char *save_str = va_arg(args, char *); const char *err_msg; size_t len; err_msg = check_pointer_msg(save_str); if (err_msg) save_str = err_msg; len = strlen(save_str) + 1; if (str + len < end) memcpy(str, save_str, len); str += len; break; } case FORMAT_STATE_PTR: /* Dereferenced pointers must be done now */ switch (*fmt.str) { /* Dereference of functions is still OK */ case 'S': case 's': case 'x': case 'K': case 'e': save_arg(void *); break; default: if (!isalnum(*fmt.str)) { save_arg(void *); break; } str = pointer(fmt.str, str, end, va_arg(args, void *), spec); if (str + 1 < end) *str++ = '\0'; else end[-1] = '\0'; /* Must be nul terminated */ } /* skip all alphanumeric pointer suffixes */ while (isalnum(*fmt.str)) fmt.str++; break; case FORMAT_STATE_NUM: if (fmt.size > sizeof(int)) { save_arg(long long); } else { save_arg(int); } } } out: return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf; #undef save_arg } EXPORT_SYMBOL_GPL(vbin_printf); /** * bstr_printf - Format a string from binary arguments and place it in a buffer * @buf: The buffer to place the result into * @size: The size of the buffer, including the trailing null space * @fmt_str: The format string to use * @bin_buf: Binary arguments for the format string * * This function like C99 vsnprintf, but the difference is that vsnprintf gets * arguments from stack, and bstr_printf gets arguments from @bin_buf which is * a binary buffer that generated by vbin_printf. * * The format follows C99 vsnprintf, but has some extensions: * see vsnprintf comment for details. * * The return value is the number of characters which would * be generated for the given input, excluding the trailing * '\0', as per ISO C99. If you want to have the exact * number of characters written into @buf as return value * (not including the trailing '\0'), use vscnprintf(). If the * return is greater than or equal to @size, the resulting * string is truncated. */ int bstr_printf(char *buf, size_t size, const char *fmt_str, const u32 *bin_buf) { struct fmt fmt = { .str = fmt_str, .state = FORMAT_STATE_NONE, }; struct printf_spec spec = {0}; char *str, *end; const char *args = (const char *)bin_buf; if (WARN_ON_ONCE(size > INT_MAX)) return 0; str = buf; end = buf + size; #define get_arg(type) \ ({ \ typeof(type) value; \ if (sizeof(type) == 8) { \ args = PTR_ALIGN(args, sizeof(u32)); \ *(u32 *)&value = *(u32 *)args; \ *((u32 *)&value + 1) = *(u32 *)(args + 4); \ } else { \ args = PTR_ALIGN(args, sizeof(type)); \ value = *(typeof(type) *)args; \ } \ args += sizeof(type); \ value; \ }) /* Make sure end is always >= buf */ if (end < buf) { end = ((void *)-1); size = end - buf; } while (*fmt.str) { const char *old_fmt = fmt.str; unsigned long long num; fmt = format_decode(fmt, &spec); switch (fmt.state) { case FORMAT_STATE_NONE: { int read = fmt.str - old_fmt; if (str < end) { int copy = read; if (copy > end - str) copy = end - str; memcpy(str, old_fmt, copy); } str += read; continue; } case FORMAT_STATE_WIDTH: set_field_width(&spec, get_arg(int)); continue; case FORMAT_STATE_PRECISION: set_precision(&spec, get_arg(int)); continue; case FORMAT_STATE_CHAR: { char c; if (!(spec.flags & LEFT)) { while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } } c = (unsigned char) get_arg(char); if (str < end) *str = c; ++str; while (--spec.field_width > 0) { if (str < end) *str = ' '; ++str; } continue; } case FORMAT_STATE_STR: { const char *str_arg = args; args += strlen(str_arg) + 1; str = string(str, end, (char *)str_arg, spec); continue; } case FORMAT_STATE_PTR: { bool process = false; int copy, len; /* Non function dereferences were already done */ switch (*fmt.str) { case 'S': case 's': case 'x': case 'K': case 'e': process = true; break; default: if (!isalnum(*fmt.str)) { process = true; break; } /* Pointer dereference was already processed */ if (str < end) { len = copy = strlen(args); if (copy > end - str) copy = end - str; memcpy(str, args, copy); str += len; args += len + 1; } } if (process) str = pointer(fmt.str, str, end, get_arg(void *), spec); while (isalnum(*fmt.str)) fmt.str++; continue; } case FORMAT_STATE_PERCENT_CHAR: if (str < end) *str = '%'; ++str; continue; case FORMAT_STATE_INVALID: goto out; case FORMAT_STATE_NUM: if (fmt.size > sizeof(int)) { num = get_arg(long long); } else { num = convert_num_spec(get_arg(int), fmt.size, spec); } str = number(str, end, num, spec); continue; } } /* while(*fmt.str) */ out: if (size > 0) { if (str < end) *str = '\0'; else end[-1] = '\0'; } #undef get_arg /* the trailing null byte doesn't count towards the total */ return str - buf; } EXPORT_SYMBOL_GPL(bstr_printf); #endif /* CONFIG_BINARY_PRINTF */ /** * vsscanf - Unformat a buffer into a list of arguments * @buf: input buffer * @fmt: format of buffer * @args: arguments */ int vsscanf(const char *buf, const char *fmt, va_list args) { const char *str = buf; char *next; char digit; int num = 0; u8 qualifier; unsigned int base; union { long long s; unsigned long long u; } val; s16 field_width; bool is_sign; while (*fmt) { /* skip any white space in format */ /* white space in format matches any amount of * white space, including none, in the input. */ if (isspace(*fmt)) { fmt = skip_spaces(++fmt); str = skip_spaces(str); } /* anything that is not a conversion must match exactly */ if (*fmt != '%' && *fmt) { if (*fmt++ != *str++) break; continue; } if (!*fmt) break; ++fmt; /* skip this conversion. * advance both strings to next white space */ if (*fmt == '*') { if (!*str) break; while (!isspace(*fmt) && *fmt != '%' && *fmt) { /* '%*[' not yet supported, invalid format */ if (*fmt == '[') return num; fmt++; } while (!isspace(*str) && *str) str++; continue; } /* get field width */ field_width = -1; if (isdigit(*fmt)) { field_width = skip_atoi(&fmt); if (field_width <= 0) break; } /* get conversion qualifier */ qualifier = -1; if (*fmt == 'h' || _tolower(*fmt) == 'l' || *fmt == 'z') { qualifier = *fmt++; if (unlikely(qualifier == *fmt)) { if (qualifier == 'h') { qualifier = 'H'; fmt++; } else if (qualifier == 'l') { qualifier = 'L'; fmt++; } } } if (!*fmt) break; if (*fmt == 'n') { /* return number of characters read so far */ *va_arg(args, int *) = str - buf; ++fmt; continue; } if (!*str) break; base = 10; is_sign = false; switch (*fmt++) { case 'c': { char *s = (char *)va_arg(args, char*); if (field_width == -1) field_width = 1; do { *s++ = *str++; } while (--field_width > 0 && *str); num++; } continue; case 's': { char *s = (char *)va_arg(args, char *); if (field_width == -1) field_width = SHRT_MAX; /* first, skip leading white space in buffer */ str = skip_spaces(str); /* now copy until next white space */ while (*str && !isspace(*str) && field_width--) *s++ = *str++; *s = '\0'; num++; } continue; /* * Warning: This implementation of the '[' conversion specifier * deviates from its glibc counterpart in the following ways: * (1) It does NOT support ranges i.e. '-' is NOT a special * character * (2) It cannot match the closing bracket ']' itself * (3) A field width is required * (4) '%*[' (discard matching input) is currently not supported * * Example usage: * ret = sscanf("00:0a:95","%2[^:]:%2[^:]:%2[^:]", * buf1, buf2, buf3); * if (ret < 3) * // etc.. */ case '[': { char *s = (char *)va_arg(args, char *); DECLARE_BITMAP(set, 256) = {0}; unsigned int len = 0; bool negate = (*fmt == '^'); /* field width is required */ if (field_width == -1) return num; if (negate) ++fmt; for ( ; *fmt && *fmt != ']'; ++fmt, ++len) __set_bit((u8)*fmt, set); /* no ']' or no character set found */ if (!*fmt || !len) return num; ++fmt; if (negate) { bitmap_complement(set, set, 256); /* exclude null '\0' byte */ __clear_bit(0, set); } /* match must be non-empty */ if (!test_bit((u8)*str, set)) return num; while (test_bit((u8)*str, set) && field_width--) *s++ = *str++; *s = '\0'; ++num; } continue; case 'o': base = 8; break; case 'x': case 'X': base = 16; break; case 'i': base = 0; fallthrough; case 'd': is_sign = true; fallthrough; case 'u': break; case '%': /* looking for '%' in str */ if (*str++ != '%') return num; continue; default: /* invalid format; stop here */ return num; } /* have some sort of integer conversion. * first, skip white space in buffer. */ str = skip_spaces(str); digit = *str; if (is_sign && digit == '-') { if (field_width == 1) break; digit = *(str + 1); } if (!digit || (base == 16 && !isxdigit(digit)) || (base == 10 && !isdigit(digit)) || (base == 8 && !isodigit(digit)) || (base == 0 && !isdigit(digit))) break; if (is_sign) val.s = simple_strntoll(str, &next, base, field_width >= 0 ? field_width : INT_MAX); else val.u = simple_strntoull(str, &next, base, field_width >= 0 ? field_width : INT_MAX); switch (qualifier) { case 'H': /* that's 'hh' in format */ if (is_sign) *va_arg(args, signed char *) = val.s; else *va_arg(args, unsigned char *) = val.u; break; case 'h': if (is_sign) *va_arg(args, short *) = val.s; else *va_arg(args, unsigned short *) = val.u; break; case 'l': if (is_sign) *va_arg(args, long *) = val.s; else *va_arg(args, unsigned long *) = val.u; break; case 'L': if (is_sign) *va_arg(args, long long *) = val.s; else *va_arg(args, unsigned long long *) = val.u; break; case 'z': *va_arg(args, size_t *) = val.u; break; default: if (is_sign) *va_arg(args, int *) = val.s; else *va_arg(args, unsigned int *) = val.u; break; } num++; if (!next) break; str = next; } return num; } EXPORT_SYMBOL(vsscanf); /** * sscanf - Unformat a buffer into a list of arguments * @buf: input buffer * @fmt: formatting of buffer * @...: resulting arguments */ int sscanf(const char *buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsscanf(buf, fmt, args); va_end(args); return i; } EXPORT_SYMBOL(sscanf);
3196 3194 24 3196 3196 3198 3196 1107 1111 1111 11 24 141 141 4 4 49 49 47 35 43 10 10 8 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 // SPDX-License-Identifier: GPL-2.0-only #include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/security.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sysctl.h> #include <linux/mman.h> #include <linux/hugetlb.h> #include <linux/vmalloc.h> #include <linux/userfaultfd_k.h> #include <linux/elf.h> #include <linux/elf-randomize.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/processor.h> #include <linux/sizes.h> #include <linux/compat.h> #include <linux/fsnotify.h> #include <linux/uaccess.h> #include <kunit/visibility.h> #include "internal.h" #include "swap.h" /** * kfree_const - conditionally free memory * @x: pointer to the memory * * Function calls kfree only if @x is not in .rodata section. */ void kfree_const(const void *x) { if (!is_kernel_rodata((unsigned long)x)) kfree(x); } EXPORT_SYMBOL(kfree_const); /** * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated. * @s: The data to copy * @len: The size of the data, not including the NUL terminator * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s with NUL-termination or %NULL in * case of error */ static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp) { char *buf; /* '+1' for the NUL terminator */ buf = kmalloc_track_caller(len + 1, gfp); if (!buf) return NULL; memcpy(buf, s, len); /* Ensure the buf is always NUL-terminated, regardless of @s. */ buf[len] = '\0'; return buf; } /** * kstrdup - allocate space for and copy an existing string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s or %NULL in case of error */ noinline char *kstrdup(const char *s, gfp_t gfp) { return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL; } EXPORT_SYMBOL(kstrdup); /** * kstrdup_const - conditionally duplicate an existing const string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Strings allocated by kstrdup_const should be freed by kfree_const and * must not be passed to krealloc(). * * Return: source string if it is in .rodata section otherwise * fallback to kstrdup. */ const char *kstrdup_const(const char *s, gfp_t gfp) { if (is_kernel_rodata((unsigned long)s)) return s; return kstrdup(s, gfp); } EXPORT_SYMBOL(kstrdup_const); /** * kstrndup - allocate space for and copy an existing string * @s: the string to duplicate * @max: read at most @max chars from @s * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Use kmemdup_nul() instead if the size is known exactly. * * Return: newly allocated copy of @s or %NULL in case of error */ char *kstrndup(const char *s, size_t max, gfp_t gfp) { return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL; } EXPORT_SYMBOL(kstrndup); /** * kmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result is physically contiguous. Use kfree() to free. */ void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) { void *p; p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kmemdup_noprof); /** * kmemdup_array - duplicate a given array. * * @src: array to duplicate. * @count: number of elements to duplicate from array. * @element_size: size of each element of array. * @gfp: GFP mask to use. * * Return: duplicated array of @src or %NULL in case of error, * result is physically contiguous. Use kfree() to free. */ void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) { return kmemdup(src, size_mul(element_size, count), gfp); } EXPORT_SYMBOL(kmemdup_array); /** * kvmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result may be not physically contiguous. Use kvfree() to free. */ void *kvmemdup(const void *src, size_t len, gfp_t gfp) { void *p; p = kvmalloc(len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kvmemdup); /** * kmemdup_nul - Create a NUL-terminated string from unterminated data * @s: The data to stringify * @len: The size of the data * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s with NUL-termination or %NULL in * case of error */ char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) { return s ? __kmemdup_nul(s, len, gfp) : NULL; } EXPORT_SYMBOL(kmemdup_nul); static kmem_buckets *user_buckets __ro_after_init; static int __init init_user_buckets(void) { user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL); return 0; } subsys_initcall(init_user_buckets); /** * memdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result is physically * contiguous, to be freed by kfree(). */ void *memdup_user(const void __user *src, size_t len) { void *p; p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(memdup_user); /** * vmemdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result may be not * physically contiguous. Use kvfree() to free. */ void *vmemdup_user(const void __user *src, size_t len) { void *p; p = kmem_buckets_valloc(user_buckets, len, GFP_USER); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kvfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(vmemdup_user); /** * strndup_user - duplicate an existing string from user space * @s: The string to duplicate * @n: Maximum number of bytes to copy, including the trailing NUL. * * Return: newly allocated copy of @s or an ERR_PTR() in case of error */ char *strndup_user(const char __user *s, long n) { char *p; long length; length = strnlen_user(s, n); if (!length) return ERR_PTR(-EFAULT); if (length > n) return ERR_PTR(-EINVAL); p = memdup_user(s, length); if (IS_ERR(p)) return p; p[length - 1] = '\0'; return p; } EXPORT_SYMBOL(strndup_user); /** * memdup_user_nul - duplicate memory region from user space and NUL-terminate * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. */ void *memdup_user_nul(const void __user *src, size_t len) { char *p; p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } p[len] = '\0'; return p; } EXPORT_SYMBOL(memdup_user_nul); /* Check if the vma is being used as a stack by this task */ int vma_is_stack_for_current(struct vm_area_struct *vma) { struct task_struct * __maybe_unused t = current; return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } /* * Change backing file, only valid to use during initial VMA setup. */ void vma_set_file(struct vm_area_struct *vma, struct file *file) { /* Changing an anonymous vma with this is illegal */ get_file(file); swap(vma->vm_file, file); fput(file); } EXPORT_SYMBOL(vma_set_file); #ifndef STACK_RND_MASK #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ #endif unsigned long randomize_stack_top(unsigned long stack_top) { unsigned long random_variable = 0; if (current->flags & PF_RANDOMIZE) { random_variable = get_random_long(); random_variable &= STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } #ifdef CONFIG_STACK_GROWSUP return PAGE_ALIGN(stack_top) + random_variable; #else return PAGE_ALIGN(stack_top) - random_variable; #endif } /** * randomize_page - Generate a random, page aligned address * @start: The smallest acceptable address the caller will take. * @range: The size of the area, starting at @start, within which the * random address must fall. * * If @start + @range would overflow, @range is capped. * * NOTE: Historical use of randomize_range, which this replaces, presumed that * @start was already page aligned. We now align it regardless. * * Return: A page aligned address within [start, start + range). On error, * @start is returned. */ unsigned long randomize_page(unsigned long start, unsigned long range) { if (!PAGE_ALIGNED(start)) { range -= PAGE_ALIGN(start) - start; start = PAGE_ALIGN(start); } if (start > ULONG_MAX - range) range = ULONG_MAX - start; range >>= PAGE_SHIFT; if (range == 0) return start; return start + (get_random_long() % range << PAGE_SHIFT); } #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT unsigned long __weak arch_randomize_brk(struct mm_struct *mm) { /* Is the current task 32bit ? */ if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) return randomize_page(mm->brk, SZ_32M); return randomize_page(mm->brk, SZ_1G); } unsigned long arch_mmap_rnd(void) { unsigned long rnd; #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS if (is_compat_task()) rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); else #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } static int mmap_is_legacy(struct rlimit *rlim_stack) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; /* On parisc the stack always grows up - so a unlimited stack should * not be an indicator to use the legacy memory layout. */ if (rlim_stack->rlim_cur == RLIM_INFINITY && !IS_ENABLED(CONFIG_STACK_GROWSUP)) return 1; return sysctl_legacy_va_layout; } /* * Leave enough space between the mmap area and the stack to honour ulimit in * the face of randomisation. */ #define MIN_GAP (SZ_128M) #define MAX_GAP (STACK_TOP / 6 * 5) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { #ifdef CONFIG_STACK_GROWSUP /* * For an upwards growing stack the calculation is much simpler. * Memory for the maximum stack size is reserved at the top of the * task. mmap_base starts directly below the stack and grows * downwards. */ return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); #else unsigned long gap = rlim_stack->rlim_cur; unsigned long pad = stack_guard_gap; /* Account for stack randomization if necessary */ if (current->flags & PF_RANDOMIZE) pad += (STACK_RND_MASK << PAGE_SHIFT); /* Values close to RLIM_INFINITY can overflow. */ if (gap + pad > gap) gap += pad; if (gap < MIN_GAP && MIN_GAP < MAX_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(STACK_TOP - gap - rnd); #endif } void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; clear_bit(MMF_TOPDOWN, &mm->flags); } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); set_bit(MMF_TOPDOWN, &mm->flags); } } #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { mm->mmap_base = TASK_UNMAPPED_BASE; clear_bit(MMF_TOPDOWN, &mm->flags); } #endif #ifdef CONFIG_MMU EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); #endif /** * __account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * @task: task used to check RLIMIT_MEMLOCK * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped * * Assumes @task and @mm are valid (i.e. at least one reference on each), and * that mmap_lock is held as writer. * * Return: * * 0 on success * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct task_struct *task, bool bypass_rlim) { unsigned long locked_vm, limit; int ret = 0; mmap_assert_write_locked(mm); locked_vm = mm->locked_vm; if (inc) { if (!bypass_rlim) { limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked_vm + pages > limit) ret = -ENOMEM; } if (!ret) mm->locked_vm = locked_vm + pages; } else { WARN_ON_ONCE(pages > locked_vm); mm->locked_vm = locked_vm - pages; } pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); return ret; } EXPORT_SYMBOL_GPL(__account_locked_vm); /** * account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against, may be NULL * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). * * Return: * * 0 on success, or if mm is NULL * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) { int ret; if (pages == 0 || !mm) return 0; mmap_write_lock(mm); ret = __account_locked_vm(mm, pages, inc, current, capable(CAP_IPC_LOCK)); mmap_write_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(account_locked_vm); unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) { unsigned long ret; struct mm_struct *mm = current->mm; unsigned long populate; LIST_HEAD(uf); ret = security_mmap_file(file, prot, flag); if (!ret) ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len); if (!ret) { if (mmap_write_lock_killable(mm)) return -EINTR; ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, &uf); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(ret, populate); } return ret; } /* * Perform a userland memory mapping into the current process address space. See * the comment for do_mmap() for more details on this operation in general. * * This differs from do_mmap() in that: * * a. An offset parameter is provided rather than pgoff, which is both checked * for overflow and page alignment. * b. mmap locking is performed on the caller's behalf. * c. Userfaultfd unmap events and memory population are handled. * * This means that this function performs essentially the same work as if * userland were invoking mmap (2). * * Returns either an error, or the address at which the requested mapping has * been performed. */ unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { if (unlikely(offset + PAGE_ALIGN(len) < offset)) return -EINVAL; if (unlikely(offset_in_page(offset))) return -EINVAL; return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); } EXPORT_SYMBOL(vm_mmap); /** * __vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return __vmalloc_noprof(bytes, flags); } EXPORT_SYMBOL(__vmalloc_array_noprof); /** * vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vmalloc_array_noprof(size_t n, size_t size) { return __vmalloc_array_noprof(n, size, GFP_KERNEL); } EXPORT_SYMBOL(vmalloc_array_noprof); /** * __vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) { return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO); } EXPORT_SYMBOL(__vcalloc_noprof); /** * vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vcalloc_noprof(size_t n, size_t size) { return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO); } EXPORT_SYMBOL(vcalloc_noprof); struct anon_vma *folio_anon_vma(const struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) return NULL; return (void *)(mapping - PAGE_MAPPING_ANON); } /** * folio_mapping - Find the mapping where this folio is stored. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Folios in the swap cache return the swap mapping * this page is stored in (which is different from the mapping for the * swap file or swap device where the data is stored). * * You can call this for folios which aren't in the swap cache or page * cache and it will return NULL. */ struct address_space *folio_mapping(struct folio *folio) { struct address_space *mapping; /* This happens if someone calls flush_dcache_page on slab page */ if (unlikely(folio_test_slab(folio))) return NULL; if (unlikely(folio_test_swapcache(folio))) return swap_address_space(folio->swap); mapping = folio->mapping; if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) return NULL; return mapping; } EXPORT_SYMBOL(folio_mapping); /** * folio_copy - Copy the contents of one folio to another. * @dst: Folio to copy to. * @src: Folio to copy from. * * The bytes in the folio represented by @src are copied to @dst. * Assumes the caller has validated that @dst is at least as large as @src. * Can be called in atomic context for order-0 folios, but if the folio is * larger, it may sleep. */ void folio_copy(struct folio *dst, struct folio *src) { long i = 0; long nr = folio_nr_pages(src); for (;;) { copy_highpage(folio_page(dst, i), folio_page(src, i)); if (++i == nr) break; cond_resched(); } } EXPORT_SYMBOL(folio_copy); int folio_mc_copy(struct folio *dst, struct folio *src) { long nr = folio_nr_pages(src); long i = 0; for (;;) { if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i))) return -EHWPOISON; if (++i == nr) break; cond_resched(); } return 0; } EXPORT_SYMBOL(folio_mc_copy); int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; static int sysctl_overcommit_ratio __read_mostly = 50; static unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ #ifdef CONFIG_SYSCTL static int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_kbytes = 0; return ret; } static void sync_overcommit_as(struct work_struct *dummy) { percpu_counter_sync(&vm_committed_as); } static int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int new_policy = -1; int ret; /* * The deviation of sync_overcommit_as could be big with loose policy * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply * with the strict "NEVER", and to avoid possible race condition (even * though user usually won't too frequently do the switching to policy * OVERCOMMIT_NEVER), the switch is done in the following order: * 1. changing the batch * 2. sync percpu count on each CPU * 3. switch the policy */ if (write) { t = *table; t.data = &new_policy; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || new_policy == -1) return ret; mm_compute_batch(new_policy); if (new_policy == OVERCOMMIT_NEVER) schedule_on_each_cpu(sync_overcommit_as); sysctl_overcommit_memory = new_policy; } else { ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); } return ret; } static int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_ratio = 0; return ret; } static const struct ctl_table util_sysctl_table[] = { { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, .proc_handler = overcommit_policy_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, .proc_handler = overcommit_ratio_handler, }, { .procname = "overcommit_kbytes", .data = &sysctl_overcommit_kbytes, .maxlen = sizeof(sysctl_overcommit_kbytes), .mode = 0644, .proc_handler = overcommit_kbytes_handler, }, { .procname = "user_reserve_kbytes", .data = &sysctl_user_reserve_kbytes, .maxlen = sizeof(sysctl_user_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "admin_reserve_kbytes", .data = &sysctl_admin_reserve_kbytes, .maxlen = sizeof(sysctl_admin_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, }; static int __init init_vm_util_sysctls(void) { register_sysctl_init("vm", util_sysctl_table); return 0; } subsys_initcall(init_vm_util_sysctls); #endif /* CONFIG_SYSCTL */ /* * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used */ unsigned long vm_commit_limit(void) { unsigned long allowed; if (sysctl_overcommit_kbytes) allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); else allowed = ((totalram_pages() - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100); allowed += total_swap_pages; return allowed; } /* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. */ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; /* * The global memory commitment made in the system can be a metric * that can be used to drive ballooning decisions when Linux is hosted * as a guest. On Hyper-V, the host implements a policy engine for dynamically * balancing memory across competing virtual machines that are hosted. * Several metrics drive this policy engine including the guest reported * memory commitment. * * The time cost of this is very low for small platforms, and for big * platform like a 2S/36C/72T Skylake server, in worst case where * vm_committed_as's spinlock is under severe contention, the time cost * could be about 30~40 microseconds. */ unsigned long vm_memory_committed(void) { return percpu_counter_sum_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { long allowed; unsigned long bytes_failed; vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { if (pages > totalram_pages() + total_swap_pages) goto error; return 0; } allowed = vm_commit_limit(); /* * Reserve some for root */ if (!cap_sys_admin) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); /* * Don't let a single process grow so big a user can't recover */ if (mm) { long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); allowed -= min_t(long, mm->total_vm / 32, reserve); } if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: bytes_failed = pages << PAGE_SHIFT; pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n", __func__, current->pid, current->comm, bytes_failed); vm_unacct_memory(pages); return -ENOMEM; } /** * get_cmdline() - copy the cmdline value to a buffer. * @task: the task whose cmdline value to copy. * @buffer: the buffer to copy to. * @buflen: the length of the buffer. Larger cmdline values are truncated * to this length. * * Return: the size of the cmdline field copied. Note that the copy does * not guarantee an ending NULL byte. */ int get_cmdline(struct task_struct *task, char *buffer, int buflen) { int res = 0; unsigned int len; struct mm_struct *mm = get_task_mm(task); unsigned long arg_start, arg_end, env_start, env_end; if (!mm) goto out; if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ spin_lock(&mm->arg_lock); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; spin_unlock(&mm->arg_lock); len = arg_end - arg_start; if (len > buflen) len = buflen; res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); /* * If the nul at the end of args has been overwritten, then * assume application is using setproctitle(3). */ if (res > 0 && buffer[res-1] != '\0' && len < buflen) { len = strnlen(buffer, res); if (len < res) { res = len; } else { len = env_end - env_start; if (len > buflen - res) len = buflen - res; res += access_process_vm(task, env_start, buffer+res, len, FOLL_FORCE); res = strnlen(buffer, res); } } out_mm: mmput(mm); out: return res; } int __weak memcmp_pages(struct page *page1, struct page *page2) { char *addr1, *addr2; int ret; addr1 = kmap_local_page(page1); addr2 = kmap_local_page(page2); ret = memcmp(addr1, addr2, PAGE_SIZE); kunmap_local(addr2); kunmap_local(addr1); return ret; } #ifdef CONFIG_PRINTK /** * mem_dump_obj - Print available provenance information * @object: object for which to find provenance information. * * This function uses pr_cont(), so that the caller is expected to have * printed out whatever preamble is appropriate. The provenance information * depends on the type of object and on how much debugging is enabled. * For example, for a slab-cache object, the slab name is printed, and, * if available, the return address and stack trace from the allocation * and last free path of that object. */ void mem_dump_obj(void *object) { const char *type; if (kmem_dump_obj(object)) return; if (vmalloc_dump_obj(object)) return; if (is_vmalloc_addr(object)) type = "vmalloc memory"; else if (virt_addr_valid(object)) type = "non-slab/vmalloc memory"; else if (object == NULL) type = "NULL pointer"; else if (object == ZERO_SIZE_PTR) type = "zero-size pointer"; else type = "non-paged memory"; pr_cont(" %s\n", type); } EXPORT_SYMBOL_GPL(mem_dump_obj); #endif /* * A driver might set a page logically offline -- PageOffline() -- and * turn the page inaccessible in the hypervisor; after that, access to page * content can be fatal. * * Some special PFN walkers -- i.e., /proc/kcore -- read content of random * pages after checking PageOffline(); however, these PFN walkers can race * with drivers that set PageOffline(). * * page_offline_freeze()/page_offline_thaw() allows for a subsystem to * synchronize with such drivers, achieving that a page cannot be set * PageOffline() while frozen. * * page_offline_begin()/page_offline_end() is used by drivers that care about * such races when setting a page PageOffline(). */ static DECLARE_RWSEM(page_offline_rwsem); void page_offline_freeze(void) { down_read(&page_offline_rwsem); } void page_offline_thaw(void) { up_read(&page_offline_rwsem); } void page_offline_begin(void) { down_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_begin); void page_offline_end(void) { up_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_end); #ifndef flush_dcache_folio void flush_dcache_folio(struct folio *folio) { long i, nr = folio_nr_pages(folio); for (i = 0; i < nr; i++) flush_dcache_page(folio_page(folio, i)); } EXPORT_SYMBOL(flush_dcache_folio); #endif
3 3 3 3 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/videodev2.h> #include "pvrusb2-hdw.h" #include "pvrusb2-devattr.h" #include "pvrusb2-context.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #include "pvrusb2-sysfs.h" #define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>" #define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner" #define DRIVER_VERSION "V4L in-tree version" #define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \ PVR2_TRACE_INFO| \ PVR2_TRACE_STD| \ PVR2_TRACE_TOLERANCE| \ PVR2_TRACE_TRAP| \ 0) int pvrusb2_debug = DEFAULT_DEBUG_MASK; module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, "Debug trace mask"); static void pvr_setup_attach(struct pvr2_context *pvr) { /* Create association with v4l layer */ pvr2_v4l2_create(pvr); #ifdef CONFIG_VIDEO_PVRUSB2_DVB /* Create association with dvb layer */ pvr2_dvb_create(pvr); #endif pvr2_sysfs_create(pvr); } static int pvr_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct pvr2_context *pvr; /* Create underlying hardware interface */ pvr = pvr2_context_create(intf,devid,pvr_setup_attach); if (!pvr) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to create hdw handler"); return -ENOMEM; } pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr); usb_set_intfdata(intf, pvr); return 0; } /* * pvr_disconnect() * */ static void pvr_disconnect(struct usb_interface *intf) { struct pvr2_context *pvr = usb_get_intfdata(intf); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr); usb_set_intfdata (intf, NULL); pvr2_context_disconnect(pvr); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr); } static struct usb_driver pvr_driver = { .name = "pvrusb2", .id_table = pvr2_device_table, .probe = pvr_probe, .disconnect = pvr_disconnect }; /* * pvr_init() / pvr_exit() * * This code is run to initialize/exit the driver. * */ static int __init pvr_init(void) { int ret; pvr2_trace(PVR2_TRACE_INIT,"pvr_init"); ret = pvr2_context_global_init(); if (ret != 0) { pvr2_trace(PVR2_TRACE_INIT,"pvr_init failure code=%d",ret); return ret; } pvr2_sysfs_class_create(); ret = usb_register(&pvr_driver); if (ret == 0) pr_info("pvrusb2: " DRIVER_VERSION ":" DRIVER_DESC "\n"); if (pvrusb2_debug) pr_info("pvrusb2: Debug mask is %d (0x%x)\n", pvrusb2_debug,pvrusb2_debug); pvr2_trace(PVR2_TRACE_INIT,"pvr_init complete"); return ret; } static void __exit pvr_exit(void) { pvr2_trace(PVR2_TRACE_INIT,"pvr_exit"); usb_deregister(&pvr_driver); pvr2_context_global_done(); pvr2_sysfs_class_destroy(); pvr2_trace(PVR2_TRACE_INIT,"pvr_exit complete"); } module_init(pvr_init); module_exit(pvr_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION("0.9.1");
2 2 2 2 1 1 1 2 1 1 1 2 2 2 1 2 1 1 2 3 1 2 2 2 2 2 2 2 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2012 Hans Verkuil <hverkuil@xs4all.nl> */ /* kernel includes */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <linux/usb.h> #include <linux/mutex.h> /* driver and module definitions */ MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>"); MODULE_DESCRIPTION("Keene FM Transmitter driver"); MODULE_LICENSE("GPL"); /* Actually, it advertises itself as a Logitech */ #define USB_KEENE_VENDOR 0x046d #define USB_KEENE_PRODUCT 0x0a0e /* Probably USB_TIMEOUT should be modified in module parameter */ #define BUFFER_LENGTH 8 #define USB_TIMEOUT 500 /* Frequency limits in MHz */ #define FREQ_MIN 76U #define FREQ_MAX 108U #define FREQ_MUL 16000U /* USB Device ID List */ static const struct usb_device_id usb_keene_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_KEENE_VENDOR, USB_KEENE_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_keene_device_table); struct keene_device { struct usb_device *usbdev; struct usb_interface *intf; struct video_device vdev; struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler hdl; struct mutex lock; u8 *buffer; unsigned curfreq; u8 tx; u8 pa; bool stereo; bool muted; bool preemph_75_us; }; static inline struct keene_device *to_keene_dev(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct keene_device, v4l2_dev); } /* Set frequency (if non-0), PA, mute and turn on/off the FM transmitter. */ static int keene_cmd_main(struct keene_device *radio, unsigned freq, bool play) { unsigned short freq_send = freq ? (freq - 76 * 16000) / 800 : 0; int ret; radio->buffer[0] = 0x00; radio->buffer[1] = 0x50; radio->buffer[2] = (freq_send >> 8) & 0xff; radio->buffer[3] = freq_send & 0xff; radio->buffer[4] = radio->pa; /* If bit 4 is set, then tune to the frequency. If bit 3 is set, then unmute; if bit 2 is set, then mute. If bit 1 is set, then enter idle mode; if bit 0 is set, then enter transmit mode. */ radio->buffer[5] = (radio->muted ? 4 : 8) | (play ? 1 : 2) | (freq ? 0x10 : 0); radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), 9, 0x21, 0x200, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(&radio->vdev.dev, "%s failed (%d)\n", __func__, ret); return ret; } if (freq) radio->curfreq = freq; return 0; } /* Set TX, stereo and preemphasis mode (50 us vs 75 us). */ static int keene_cmd_set(struct keene_device *radio) { int ret; radio->buffer[0] = 0x00; radio->buffer[1] = 0x51; radio->buffer[2] = radio->tx; /* If bit 0 is set, then transmit mono, otherwise stereo. If bit 2 is set, then enable 75 us preemphasis, otherwise it is 50 us. */ radio->buffer[3] = (radio->stereo ? 0 : 1) | (radio->preemph_75_us ? 4 : 0); radio->buffer[4] = 0x00; radio->buffer[5] = 0x00; radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), 9, 0x21, 0x200, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(&radio->vdev.dev, "%s failed (%d)\n", __func__, ret); return ret; } return 0; } /* Handle unplugging the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_keene_device_release. */ static void usb_keene_disconnect(struct usb_interface *intf) { struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf)); mutex_lock(&radio->lock); usb_set_intfdata(intf, NULL); video_unregister_device(&radio->vdev); v4l2_device_disconnect(&radio->v4l2_dev); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); } static int usb_keene_suspend(struct usb_interface *intf, pm_message_t message) { struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf)); return keene_cmd_main(radio, 0, false); } static int usb_keene_resume(struct usb_interface *intf) { struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf)); mdelay(50); keene_cmd_set(radio); keene_cmd_main(radio, radio->curfreq, true); return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct keene_device *radio = video_drvdata(file); strscpy(v->driver, "radio-keene", sizeof(v->driver)); strscpy(v->card, "Keene FM Transmitter", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); return 0; } static int vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *v) { struct keene_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; strscpy(v->name, "FM", sizeof(v->name)); v->rangelow = FREQ_MIN * FREQ_MUL; v->rangehigh = FREQ_MAX * FREQ_MUL; v->txsubchans = radio->stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; return 0; } static int vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *v) { struct keene_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; radio->stereo = (v->txsubchans == V4L2_TUNER_SUB_STEREO); return keene_cmd_set(radio); } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct keene_device *radio = video_drvdata(file); unsigned freq = f->frequency; if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); return keene_cmd_main(radio, freq, true); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct keene_device *radio = video_drvdata(file); if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq; return 0; } static int keene_s_ctrl(struct v4l2_ctrl *ctrl) { static const u8 db2tx[] = { /* -15, -12, -9, -6, -3, 0 dB */ 0x03, 0x13, 0x02, 0x12, 0x22, 0x32, /* 3, 6, 9, 12, 15, 18 dB */ 0x21, 0x31, 0x20, 0x30, 0x40, 0x50 }; struct keene_device *radio = container_of(ctrl->handler, struct keene_device, hdl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: radio->muted = ctrl->val; return keene_cmd_main(radio, 0, true); case V4L2_CID_TUNE_POWER_LEVEL: /* To go from dBuV to the register value we apply the following formula: */ radio->pa = (ctrl->val - 71) * 100 / 62; return keene_cmd_main(radio, 0, true); case V4L2_CID_TUNE_PREEMPHASIS: radio->preemph_75_us = ctrl->val == V4L2_PREEMPHASIS_75_uS; return keene_cmd_set(radio); case V4L2_CID_AUDIO_COMPRESSION_GAIN: radio->tx = db2tx[(ctrl->val - (s32)ctrl->minimum) / (s32)ctrl->step]; return keene_cmd_set(radio); } return -EINVAL; } /* File system interface */ static const struct v4l2_file_operations usb_keene_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = v4l2_fh_release, .poll = v4l2_ctrl_poll, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ctrl_ops keene_ctrl_ops = { .s_ctrl = keene_s_ctrl, }; static const struct v4l2_ioctl_ops usb_keene_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_modulator = vidioc_g_modulator, .vidioc_s_modulator = vidioc_s_modulator, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static void usb_keene_video_device_release(struct v4l2_device *v4l2_dev) { struct keene_device *radio = to_keene_dev(v4l2_dev); /* free rest memory */ v4l2_ctrl_handler_free(&radio->hdl); kfree(radio->buffer); kfree(radio); } /* check if the device is present and register with v4l and usb if it is */ static int usb_keene_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct keene_device *radio; struct v4l2_ctrl_handler *hdl; int retval = 0; /* * The Keene FM transmitter USB device has the same USB ID as * the Logitech AudioHub Speaker, but it should ignore the hid. * Check if the name is that of the Keene device. * If not, then someone connected the AudioHub and we shouldn't * attempt to handle this driver. * For reference: the product name of the AudioHub is * "AudioHub Speaker". */ if (dev->product && strcmp(dev->product, "B-LINK USB Audio ")) return -ENODEV; radio = kzalloc(sizeof(struct keene_device), GFP_KERNEL); if (radio) radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL); if (!radio || !radio->buffer) { dev_err(&intf->dev, "kmalloc for keene_device failed\n"); kfree(radio); retval = -ENOMEM; goto err; } hdl = &radio->hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); v4l2_ctrl_new_std_menu(hdl, &keene_ctrl_ops, V4L2_CID_TUNE_PREEMPHASIS, V4L2_PREEMPHASIS_75_uS, 1, V4L2_PREEMPHASIS_50_uS); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_TUNE_POWER_LEVEL, 84, 118, 1, 118); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_AUDIO_COMPRESSION_GAIN, -15, 18, 3, 0); radio->pa = 118; radio->tx = 0x32; radio->stereo = true; if (hdl->error) { retval = hdl->error; v4l2_ctrl_handler_free(hdl); goto err_v4l2; } retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto err_v4l2; } mutex_init(&radio->lock); radio->v4l2_dev.ctrl_handler = hdl; radio->v4l2_dev.release = usb_keene_video_device_release; strscpy(radio->vdev.name, radio->v4l2_dev.name, sizeof(radio->vdev.name)); radio->vdev.v4l2_dev = &radio->v4l2_dev; radio->vdev.fops = &usb_keene_fops; radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; radio->vdev.lock = &radio->lock; radio->vdev.release = video_device_release_empty; radio->vdev.vfl_dir = VFL_DIR_TX; radio->vdev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_MODULATOR; radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; usb_set_intfdata(intf, &radio->v4l2_dev); video_set_drvdata(&radio->vdev, radio); /* at least 11ms is needed in order to settle hardware */ msleep(20); keene_cmd_main(radio, 95.16 * FREQ_MUL, false); retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1); if (retval < 0) { dev_err(&intf->dev, "could not register video device\n"); goto err_vdev; } v4l2_ctrl_handler_setup(hdl); dev_info(&intf->dev, "V4L2 device registered as %s\n", video_device_node_name(&radio->vdev)); return 0; err_vdev: v4l2_device_unregister(&radio->v4l2_dev); err_v4l2: kfree(radio->buffer); kfree(radio); err: return retval; } /* USB subsystem interface */ static struct usb_driver usb_keene_driver = { .name = "radio-keene", .probe = usb_keene_probe, .disconnect = usb_keene_disconnect, .id_table = usb_keene_device_table, .suspend = usb_keene_suspend, .resume = usb_keene_resume, .reset_resume = usb_keene_resume, }; module_usb_driver(usb_keene_driver);
2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 // SPDX-License-Identifier: GPL-2.0-only /* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (line6@grabner-graz.at) */ #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "capture.h" #include "driver.h" #include "pcm.h" #include "playback.h" /* Software stereo volume control. */ static void change_volume(struct urb *urb_out, int volume[], int bytes_per_frame) { int chn = 0; if (volume[0] == 256 && volume[1] == 256) return; /* maximum volume - no change */ if (bytes_per_frame == 4) { __le16 *p, *buf_end; p = (__le16 *)urb_out->transfer_buffer; buf_end = p + urb_out->transfer_buffer_length / sizeof(*p); for (; p < buf_end; ++p) { short pv = le16_to_cpu(*p); int val = (pv * volume[chn & 1]) >> 8; pv = clamp(val, -0x8000, 0x7fff); *p = cpu_to_le16(pv); ++chn; } } else if (bytes_per_frame == 6) { unsigned char *p, *buf_end; p = (unsigned char *)urb_out->transfer_buffer; buf_end = p + urb_out->transfer_buffer_length; for (; p < buf_end; p += 3) { int val; val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16); val = (val * volume[chn & 1]) >> 8; val = clamp(val, -0x800000, 0x7fffff); p[0] = val; p[1] = val >> 8; p[2] = val >> 16; ++chn; } } } /* Create signal for impulse response test. */ static void create_impulse_test_signal(struct snd_line6_pcm *line6pcm, struct urb *urb_out, int bytes_per_frame) { int frames = urb_out->transfer_buffer_length / bytes_per_frame; if (bytes_per_frame == 4) { int i; short *pi = (short *)line6pcm->prev_fbuf; short *po = (short *)urb_out->transfer_buffer; for (i = 0; i < frames; ++i) { po[0] = pi[0]; po[1] = 0; pi += 2; po += 2; } } else if (bytes_per_frame == 6) { int i, j; unsigned char *pi = line6pcm->prev_fbuf; unsigned char *po = urb_out->transfer_buffer; for (i = 0; i < frames; ++i) { for (j = 0; j < bytes_per_frame / 2; ++j) po[j] = pi[j]; for (; j < bytes_per_frame; ++j) po[j] = 0; pi += bytes_per_frame; po += bytes_per_frame; } } if (--line6pcm->impulse_count <= 0) { ((unsigned char *)(urb_out->transfer_buffer))[bytes_per_frame - 1] = line6pcm->impulse_volume; line6pcm->impulse_count = line6pcm->impulse_period; } } /* Add signal to buffer for software monitoring. */ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal, int volume, int bytes_per_frame) { if (volume == 0) return; /* zero volume - no change */ if (bytes_per_frame == 4) { __le16 *pi, *po, *buf_end; pi = (__le16 *)signal; po = (__le16 *)urb_out->transfer_buffer; buf_end = po + urb_out->transfer_buffer_length / sizeof(*po); for (; po < buf_end; ++pi, ++po) { short pov = le16_to_cpu(*po); short piv = le16_to_cpu(*pi); int val = pov + ((piv * volume) >> 8); pov = clamp(val, -0x8000, 0x7fff); *po = cpu_to_le16(pov); } } /* We don't need to handle devices with 6 bytes per frame here since they all support hardware monitoring. */ } /* Find a free URB, prepare audio data, and submit URB. must be called in line6pcm->out.lock context */ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm) { int index; int i, urb_size, urb_frames; int ret; const int bytes_per_frame = line6pcm->properties->bytes_per_channel * line6pcm->properties->playback_hw.channels_max; const int frame_increment = line6pcm->properties->rates.rats[0].num_min; const int frame_factor = line6pcm->properties->rates.rats[0].den * (line6pcm->line6->intervals_per_second / LINE6_ISO_INTERVAL); struct urb *urb_out; index = find_first_zero_bit(&line6pcm->out.active_urbs, line6pcm->line6->iso_buffers); if (index < 0 || index >= line6pcm->line6->iso_buffers) { dev_err(line6pcm->line6->ifcdev, "no free URB found\n"); return -EINVAL; } urb_out = line6pcm->out.urbs[index]; urb_size = 0; /* TODO: this may not work for LINE6_ISO_PACKETS != 1 */ for (i = 0; i < LINE6_ISO_PACKETS; ++i) { /* compute frame size for given sampling rate */ int fsize = 0; struct usb_iso_packet_descriptor *fout = &urb_out->iso_frame_desc[i]; fsize = line6pcm->prev_fsize; if (fsize == 0) { int n; line6pcm->out.count += frame_increment; n = line6pcm->out.count / frame_factor; line6pcm->out.count -= n * frame_factor; fsize = n; } fsize *= bytes_per_frame; fout->offset = urb_size; fout->length = fsize; urb_size += fsize; } if (urb_size == 0) { /* can't determine URB size */ dev_err(line6pcm->line6->ifcdev, "driver bug: urb_size = 0\n"); return -EINVAL; } urb_frames = urb_size / bytes_per_frame; urb_out->transfer_buffer = line6pcm->out.buffer + index * LINE6_ISO_PACKETS * line6pcm->max_packet_size_out; urb_out->transfer_buffer_length = urb_size; urb_out->context = line6pcm; if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running) && !test_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags)) { struct snd_pcm_runtime *runtime = get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK)->runtime; if (line6pcm->out.pos + urb_frames > runtime->buffer_size) { /* The transferred area goes over buffer boundary, copy the data to the temp buffer. */ int len; len = runtime->buffer_size - line6pcm->out.pos; if (len > 0) { memcpy(urb_out->transfer_buffer, runtime->dma_area + line6pcm->out.pos * bytes_per_frame, len * bytes_per_frame); memcpy(urb_out->transfer_buffer + len * bytes_per_frame, runtime->dma_area, (urb_frames - len) * bytes_per_frame); } else dev_err(line6pcm->line6->ifcdev, "driver bug: len = %d\n", len); } else { memcpy(urb_out->transfer_buffer, runtime->dma_area + line6pcm->out.pos * bytes_per_frame, urb_out->transfer_buffer_length); } line6pcm->out.pos += urb_frames; if (line6pcm->out.pos >= runtime->buffer_size) line6pcm->out.pos -= runtime->buffer_size; change_volume(urb_out, line6pcm->volume_playback, bytes_per_frame); } else { memset(urb_out->transfer_buffer, 0, urb_out->transfer_buffer_length); } spin_lock_nested(&line6pcm->in.lock, SINGLE_DEPTH_NESTING); if (line6pcm->prev_fbuf) { if (test_bit(LINE6_STREAM_IMPULSE, &line6pcm->out.running)) { create_impulse_test_signal(line6pcm, urb_out, bytes_per_frame); if (test_bit(LINE6_STREAM_PCM, &line6pcm->in.running)) { line6_capture_copy(line6pcm, urb_out->transfer_buffer, urb_out-> transfer_buffer_length); line6_capture_check_period(line6pcm, urb_out->transfer_buffer_length); } } else { if (!(line6pcm->line6->properties->capabilities & LINE6_CAP_HWMON) && line6pcm->out.running && line6pcm->in.running) add_monitor_signal(urb_out, line6pcm->prev_fbuf, line6pcm->volume_monitor, bytes_per_frame); } line6pcm->prev_fbuf = NULL; line6pcm->prev_fsize = 0; } spin_unlock(&line6pcm->in.lock); ret = usb_submit_urb(urb_out, GFP_ATOMIC); if (ret == 0) set_bit(index, &line6pcm->out.active_urbs); else dev_err(line6pcm->line6->ifcdev, "URB out #%d submission failed (%d)\n", index, ret); return 0; } /* Submit all currently available playback URBs. must be called in line6pcm->out.lock context */ int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm) { int ret = 0, i; for (i = 0; i < line6pcm->line6->iso_buffers; ++i) { ret = submit_audio_out_urb(line6pcm); if (ret < 0) break; } return ret; } /* Callback for completed playback URB. */ static void audio_out_callback(struct urb *urb) { int i, index, length = 0, shutdown = 0; unsigned long flags; struct snd_line6_pcm *line6pcm = (struct snd_line6_pcm *)urb->context; struct snd_pcm_substream *substream = get_substream(line6pcm, SNDRV_PCM_STREAM_PLAYBACK); const int bytes_per_frame = line6pcm->properties->bytes_per_channel * line6pcm->properties->playback_hw.channels_max; #if USE_CLEAR_BUFFER_WORKAROUND memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); #endif line6pcm->out.last_frame = urb->start_frame; /* find index of URB */ for (index = 0; index < line6pcm->line6->iso_buffers; index++) if (urb == line6pcm->out.urbs[index]) break; if (index >= line6pcm->line6->iso_buffers) return; /* URB has been unlinked asynchronously */ for (i = 0; i < LINE6_ISO_PACKETS; i++) length += urb->iso_frame_desc[i].length; spin_lock_irqsave(&line6pcm->out.lock, flags); if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running)) { struct snd_pcm_runtime *runtime = substream->runtime; line6pcm->out.pos_done += length / bytes_per_frame; if (line6pcm->out.pos_done >= runtime->buffer_size) line6pcm->out.pos_done -= runtime->buffer_size; } clear_bit(index, &line6pcm->out.active_urbs); for (i = 0; i < LINE6_ISO_PACKETS; i++) if (urb->iso_frame_desc[i].status == -EXDEV) { shutdown = 1; break; } if (test_and_clear_bit(index, &line6pcm->out.unlink_urbs)) shutdown = 1; if (!shutdown) { submit_audio_out_urb(line6pcm); if (test_bit(LINE6_STREAM_PCM, &line6pcm->out.running)) { line6pcm->out.bytes += length; if (line6pcm->out.bytes >= line6pcm->out.period) { line6pcm->out.bytes %= line6pcm->out.period; spin_unlock(&line6pcm->out.lock); snd_pcm_period_elapsed(substream); spin_lock(&line6pcm->out.lock); } } } spin_unlock_irqrestore(&line6pcm->out.lock, flags); } /* open playback callback */ static int snd_line6_playback_open(struct snd_pcm_substream *substream) { int err; struct snd_pcm_runtime *runtime = substream->runtime; struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); err = snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &line6pcm->properties->rates); if (err < 0) return err; runtime->hw = line6pcm->properties->playback_hw; return 0; } /* close playback callback */ static int snd_line6_playback_close(struct snd_pcm_substream *substream) { return 0; } /* playback operators */ const struct snd_pcm_ops snd_line6_playback_ops = { .open = snd_line6_playback_open, .close = snd_line6_playback_close, .hw_params = snd_line6_hw_params, .hw_free = snd_line6_hw_free, .prepare = snd_line6_prepare, .trigger = snd_line6_trigger, .pointer = snd_line6_pointer, }; int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm) { struct usb_line6 *line6 = line6pcm->line6; int i; line6pcm->out.urbs = kcalloc(line6->iso_buffers, sizeof(struct urb *), GFP_KERNEL); if (line6pcm->out.urbs == NULL) return -ENOMEM; /* create audio URBs and fill in constant values: */ for (i = 0; i < line6->iso_buffers; ++i) { struct urb *urb; /* URB for audio out: */ urb = line6pcm->out.urbs[i] = usb_alloc_urb(LINE6_ISO_PACKETS, GFP_KERNEL); if (urb == NULL) return -ENOMEM; urb->dev = line6->usbdev; urb->pipe = usb_sndisocpipe(line6->usbdev, line6->properties->ep_audio_w & USB_ENDPOINT_NUMBER_MASK); urb->transfer_flags = URB_ISO_ASAP; urb->start_frame = -1; urb->number_of_packets = LINE6_ISO_PACKETS; urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_out_callback; if (usb_urb_ep_type_check(urb)) return -EINVAL; } return 0; }
53 42 43 53 53 11 53 53 11 11 11 53 53 43 31 31 43 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 // SPDX-License-Identifier: GPL-2.0-only /* * LED Class Core * * Copyright 2005-2006 Openedhand Ltd. * * Author: Richard Purdie <rpurdie@openedhand.com> */ #include <linux/kernel.h> #include <linux/led-class-multicolor.h> #include <linux/leds.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/property.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <uapi/linux/uleds.h> #include "leds.h" DECLARE_RWSEM(leds_list_lock); EXPORT_SYMBOL_GPL(leds_list_lock); LIST_HEAD(leds_list); EXPORT_SYMBOL_GPL(leds_list); static const char * const led_colors[LED_COLOR_ID_MAX] = { [LED_COLOR_ID_WHITE] = "white", [LED_COLOR_ID_RED] = "red", [LED_COLOR_ID_GREEN] = "green", [LED_COLOR_ID_BLUE] = "blue", [LED_COLOR_ID_AMBER] = "amber", [LED_COLOR_ID_VIOLET] = "violet", [LED_COLOR_ID_YELLOW] = "yellow", [LED_COLOR_ID_IR] = "ir", [LED_COLOR_ID_MULTI] = "multicolor", [LED_COLOR_ID_RGB] = "rgb", [LED_COLOR_ID_PURPLE] = "purple", [LED_COLOR_ID_ORANGE] = "orange", [LED_COLOR_ID_PINK] = "pink", [LED_COLOR_ID_CYAN] = "cyan", [LED_COLOR_ID_LIME] = "lime", }; static int __led_set_brightness(struct led_classdev *led_cdev, unsigned int value) { if (!led_cdev->brightness_set) return -ENOTSUPP; led_cdev->brightness_set(led_cdev, value); return 0; } static int __led_set_brightness_blocking(struct led_classdev *led_cdev, unsigned int value) { if (!led_cdev->brightness_set_blocking) return -ENOTSUPP; return led_cdev->brightness_set_blocking(led_cdev, value); } static void led_timer_function(struct timer_list *t) { struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer); unsigned long brightness; unsigned long delay; if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { led_set_brightness_nosleep(led_cdev, LED_OFF); clear_bit(LED_BLINK_SW, &led_cdev->work_flags); return; } if (test_and_clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags)) { clear_bit(LED_BLINK_SW, &led_cdev->work_flags); return; } brightness = led_get_brightness(led_cdev); if (!brightness) { /* Time to switch the LED on. */ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags)) brightness = led_cdev->new_blink_brightness; else brightness = led_cdev->blink_brightness; delay = led_cdev->blink_delay_on; } else { /* Store the current brightness value to be able * to restore it when the delay_off period is over. */ led_cdev->blink_brightness = brightness; brightness = LED_OFF; delay = led_cdev->blink_delay_off; } led_set_brightness_nosleep(led_cdev, brightness); /* Return in next iteration if led is in one-shot mode and we are in * the final blink state so that the led is toggled each delay_on + * delay_off milliseconds in worst case. */ if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags)) { if (test_bit(LED_BLINK_INVERT, &led_cdev->work_flags)) { if (brightness) set_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); } else { if (!brightness) set_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); } } mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); } static void set_brightness_delayed_set_brightness(struct led_classdev *led_cdev, unsigned int value) { int ret; ret = __led_set_brightness(led_cdev, value); if (ret == -ENOTSUPP) { ret = __led_set_brightness_blocking(led_cdev, value); if (ret == -ENOTSUPP) /* No back-end support to set a fixed brightness value */ return; } /* LED HW might have been unplugged, therefore don't warn */ if (ret == -ENODEV && led_cdev->flags & LED_UNREGISTERING && led_cdev->flags & LED_HW_PLUGGABLE) return; if (ret < 0) dev_err(led_cdev->dev, "Setting an LED's brightness failed (%d)\n", ret); } static void set_brightness_delayed(struct work_struct *ws) { struct led_classdev *led_cdev = container_of(ws, struct led_classdev, set_brightness_work); if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) { led_stop_software_blink(led_cdev); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); } /* * Triggers may call led_set_brightness(LED_OFF), * led_set_brightness(LED_FULL) in quick succession to disable blinking * and turn the LED on. Both actions may have been scheduled to run * before this work item runs once. To make sure this works properly * handle LED_SET_BRIGHTNESS_OFF first. */ if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) { set_brightness_delayed_set_brightness(led_cdev, LED_OFF); /* * The consecutives led_set_brightness(LED_OFF), * led_set_brightness(LED_FULL) could have been executed out of * order (LED_FULL first), if the work_flags has been set * between LED_SET_BRIGHTNESS_OFF and LED_SET_BRIGHTNESS of this * work. To avoid ending with the LED turned off, turn the LED * on again. */ if (led_cdev->delayed_set_value != LED_OFF) set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); } if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags)) set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value); if (test_and_clear_bit(LED_SET_BLINK, &led_cdev->work_flags)) { unsigned long delay_on = led_cdev->delayed_delay_on; unsigned long delay_off = led_cdev->delayed_delay_off; led_blink_set(led_cdev, &delay_on, &delay_off); } } static void led_set_software_blink(struct led_classdev *led_cdev, unsigned long delay_on, unsigned long delay_off) { int current_brightness; current_brightness = led_get_brightness(led_cdev); if (current_brightness) led_cdev->blink_brightness = current_brightness; if (!led_cdev->blink_brightness) led_cdev->blink_brightness = led_cdev->max_brightness; led_cdev->blink_delay_on = delay_on; led_cdev->blink_delay_off = delay_off; /* never on - just set to off */ if (!delay_on) { led_set_brightness_nosleep(led_cdev, LED_OFF); return; } /* never off - just set to brightness */ if (!delay_off) { led_set_brightness_nosleep(led_cdev, led_cdev->blink_brightness); return; } set_bit(LED_BLINK_SW, &led_cdev->work_flags); mod_timer(&led_cdev->blink_timer, jiffies + 1); } static void led_blink_setup(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) && led_cdev->blink_set && !led_cdev->blink_set(led_cdev, delay_on, delay_off)) return; /* blink with 1 Hz as default if nothing specified */ if (!*delay_on && !*delay_off) *delay_on = *delay_off = 500; led_set_software_blink(led_cdev, *delay_on, *delay_off); } void led_init_core(struct led_classdev *led_cdev) { INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed); timer_setup(&led_cdev->blink_timer, led_timer_function, 0); } EXPORT_SYMBOL_GPL(led_init_core); void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { timer_delete_sync(&led_cdev->blink_timer); clear_bit(LED_BLINK_SW, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL_GPL(led_blink_set); void led_blink_set_oneshot(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off, int invert) { if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) && timer_pending(&led_cdev->blink_timer)) return; set_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); if (invert) set_bit(LED_BLINK_INVERT, &led_cdev->work_flags); else clear_bit(LED_BLINK_INVERT, &led_cdev->work_flags); led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL_GPL(led_blink_set_oneshot); void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on, unsigned long delay_off) { /* If necessary delegate to a work queue task. */ if (led_cdev->blink_set && led_cdev->brightness_set_blocking) { led_cdev->delayed_delay_on = delay_on; led_cdev->delayed_delay_off = delay_off; set_bit(LED_SET_BLINK, &led_cdev->work_flags); queue_work(led_cdev->wq, &led_cdev->set_brightness_work); return; } led_blink_set(led_cdev, &delay_on, &delay_off); } EXPORT_SYMBOL_GPL(led_blink_set_nosleep); void led_stop_software_blink(struct led_classdev *led_cdev) { timer_delete_sync(&led_cdev->blink_timer); led_cdev->blink_delay_on = 0; led_cdev->blink_delay_off = 0; clear_bit(LED_BLINK_SW, &led_cdev->work_flags); } EXPORT_SYMBOL_GPL(led_stop_software_blink); void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness) { /* * If software blink is active, delay brightness setting * until the next timer tick. */ if (test_bit(LED_BLINK_SW, &led_cdev->work_flags)) { /* * If we need to disable soft blinking delegate this to the * work queue task to avoid problems in case we are called * from hard irq context. */ if (!brightness) { set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags); queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } else { set_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags); led_cdev->new_blink_brightness = brightness; } return; } led_set_brightness_nosleep(led_cdev, brightness); } EXPORT_SYMBOL_GPL(led_set_brightness); void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value) { /* Use brightness_set op if available, it is guaranteed not to sleep */ if (!__led_set_brightness(led_cdev, value)) return; /* * Brightness setting can sleep, delegate it to a work queue task. * value 0 / LED_OFF is special, since it also disables hw-blinking * (sw-blink disable is handled in led_set_brightness()). * To avoid a hw-blink-disable getting lost when a second brightness * change is done immediately afterwards (before the work runs), * it uses a separate work_flag. */ led_cdev->delayed_set_value = value; /* Ensure delayed_set_value is seen before work_flags modification */ smp_mb__before_atomic(); if (value) set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); else { clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); clear_bit(LED_SET_BLINK, &led_cdev->work_flags); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); } queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_set_brightness_nopm); void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value) { led_cdev->brightness = min(value, led_cdev->max_brightness); if (led_cdev->flags & LED_SUSPENDED) return; led_set_brightness_nopm(led_cdev, led_cdev->brightness); } EXPORT_SYMBOL_GPL(led_set_brightness_nosleep); int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value) { if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) return -EBUSY; led_cdev->brightness = min(value, led_cdev->max_brightness); if (led_cdev->flags & LED_SUSPENDED) return 0; return __led_set_brightness_blocking(led_cdev, led_cdev->brightness); } EXPORT_SYMBOL_GPL(led_set_brightness_sync); /* * This is a led-core function because just like led_set_brightness() * it is used in the kernel by e.g. triggers. */ void led_mc_set_brightness(struct led_classdev *led_cdev, unsigned int *intensity_value, unsigned int num_colors, unsigned int brightness) { struct led_classdev_mc *mcled_cdev; unsigned int i; if (!(led_cdev->flags & LED_MULTI_COLOR)) { dev_err_once(led_cdev->dev, "error not a multi-color LED\n"); return; } mcled_cdev = lcdev_to_mccdev(led_cdev); if (num_colors != mcled_cdev->num_colors) { dev_err_once(led_cdev->dev, "error num_colors mismatch %u != %u\n", num_colors, mcled_cdev->num_colors); return; } for (i = 0; i < mcled_cdev->num_colors; i++) mcled_cdev->subled_info[i].intensity = intensity_value[i]; led_set_brightness(led_cdev, brightness); } EXPORT_SYMBOL_GPL(led_mc_set_brightness); int led_update_brightness(struct led_classdev *led_cdev) { int ret; if (led_cdev->brightness_get) { ret = led_cdev->brightness_get(led_cdev); if (ret < 0) return ret; led_cdev->brightness = ret; } return 0; } EXPORT_SYMBOL_GPL(led_update_brightness); u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size) { struct fwnode_handle *fwnode = led_cdev->dev->fwnode; u32 *pattern; int count; count = fwnode_property_count_u32(fwnode, "led-pattern"); if (count < 0) return NULL; pattern = kcalloc(count, sizeof(*pattern), GFP_KERNEL); if (!pattern) return NULL; if (fwnode_property_read_u32_array(fwnode, "led-pattern", pattern, count)) { kfree(pattern); return NULL; } *size = count; return pattern; } EXPORT_SYMBOL_GPL(led_get_default_pattern); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_disable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags |= LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_disable); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_enable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags &= ~LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_enable); static void led_parse_fwnode_props(struct device *dev, struct fwnode_handle *fwnode, struct led_properties *props) { int ret; if (!fwnode) return; if (fwnode_property_present(fwnode, "label")) { ret = fwnode_property_read_string(fwnode, "label", &props->label); if (ret) dev_err(dev, "Error parsing 'label' property (%d)\n", ret); return; } if (fwnode_property_present(fwnode, "color")) { ret = fwnode_property_read_u32(fwnode, "color", &props->color); if (ret) dev_err(dev, "Error parsing 'color' property (%d)\n", ret); else if (props->color >= LED_COLOR_ID_MAX) dev_err(dev, "LED color identifier out of range\n"); else props->color_present = true; } if (!fwnode_property_present(fwnode, "function")) return; ret = fwnode_property_read_string(fwnode, "function", &props->function); if (ret) { dev_err(dev, "Error parsing 'function' property (%d)\n", ret); } if (!fwnode_property_present(fwnode, "function-enumerator")) return; ret = fwnode_property_read_u32(fwnode, "function-enumerator", &props->func_enum); if (ret) { dev_err(dev, "Error parsing 'function-enumerator' property (%d)\n", ret); } else { props->func_enum_present = true; } } int led_compose_name(struct device *dev, struct led_init_data *init_data, char *led_classdev_name) { struct led_properties props = {}; struct fwnode_handle *fwnode = init_data->fwnode; const char *devicename = init_data->devicename; if (!led_classdev_name) return -EINVAL; led_parse_fwnode_props(dev, fwnode, &props); if (props.label) { /* * If init_data.devicename is NULL, then it indicates that * DT label should be used as-is for LED class device name. * Otherwise the label is prepended with devicename to compose * the final LED class device name. */ if (!devicename) { strscpy(led_classdev_name, props.label, LED_MAX_NAME_SIZE); } else { snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, props.label); } } else if (props.function || props.color_present) { char tmp_buf[LED_MAX_NAME_SIZE]; if (props.func_enum_present) { snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d", props.color_present ? led_colors[props.color] : "", props.function ?: "", props.func_enum); } else { snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s", props.color_present ? led_colors[props.color] : "", props.function ?: ""); } if (init_data->devname_mandatory) { snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, tmp_buf); } else { strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE); } } else if (init_data->default_label) { if (!devicename) { dev_err(dev, "Legacy LED naming requires devicename segment"); return -EINVAL; } snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, init_data->default_label); } else if (is_of_node(fwnode)) { strscpy(led_classdev_name, to_of_node(fwnode)->name, LED_MAX_NAME_SIZE); } else return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(led_compose_name); const char *led_get_color_name(u8 color_id) { if (color_id >= ARRAY_SIZE(led_colors)) return NULL; return led_colors[color_id]; } EXPORT_SYMBOL_GPL(led_get_color_name); enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode) { const char *state = NULL; if (!fwnode_property_read_string(fwnode, "default-state", &state)) { if (!strcmp(state, "keep")) return LEDS_DEFSTATE_KEEP; if (!strcmp(state, "on")) return LEDS_DEFSTATE_ON; } return LEDS_DEFSTATE_OFF; } EXPORT_SYMBOL_GPL(led_init_default_state_get);
3 5 15 15 14 15 14 15 7 1 1 1 10 10 1 9 9 9 9 9 9 1 8 8 2 14 14 14 2 2 2 2 2 1 1 1 5 2 3 5 8 8 9 8 8 1 8 1 7 1 3 4 4 4 2 7 7 7 1 4 3 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 // SPDX-License-Identifier: GPL-2.0 /* * Driver for Meywa-Denki & KAYAC YUREX * * Copyright (C) 2010 Tomoki Sekiyama (tomoki.sekiyama@gmail.com) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/hid.h> #define DRIVER_AUTHOR "Tomoki Sekiyama" #define DRIVER_DESC "Driver for Meywa-Denki & KAYAC YUREX" #define YUREX_VENDOR_ID 0x0c45 #define YUREX_PRODUCT_ID 0x1010 #define CMD_ACK '!' #define CMD_ANIMATE 'A' #define CMD_COUNT 'C' #define CMD_LED 'L' #define CMD_READ 'R' #define CMD_SET 'S' #define CMD_VERSION 'V' #define CMD_EOF 0x0d #define CMD_PADDING 0xff #define YUREX_BUF_SIZE 8 #define YUREX_WRITE_TIMEOUT (HZ*2) /* table of devices that work with this driver */ static struct usb_device_id yurex_table[] = { { USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, yurex_table); #ifdef CONFIG_USB_DYNAMIC_MINORS #define YUREX_MINOR_BASE 0 #else #define YUREX_MINOR_BASE 192 #endif /* Structure to hold all of our device specific stuff */ struct usb_yurex { struct usb_device *udev; struct usb_interface *interface; __u8 int_in_endpointAddr; struct urb *urb; /* URB for interrupt in */ unsigned char *int_buffer; /* buffer for intterupt in */ struct urb *cntl_urb; /* URB for control msg */ struct usb_ctrlrequest *cntl_req; /* req for control msg */ unsigned char *cntl_buffer; /* buffer for control msg */ struct kref kref; struct mutex io_mutex; unsigned long disconnected:1; struct fasync_struct *async_queue; wait_queue_head_t waitq; spinlock_t lock; __s64 bbu; /* BBU from device */ }; #define to_yurex_dev(d) container_of(d, struct usb_yurex, kref) static struct usb_driver yurex_driver; static const struct file_operations yurex_fops; static void yurex_control_callback(struct urb *urb) { struct usb_yurex *dev = urb->context; int status = urb->status; if (status) { dev_err(&urb->dev->dev, "%s - control failed: %d\n", __func__, status); wake_up_interruptible(&dev->waitq); return; } /* on success, sender woken up by CMD_ACK int in, or timeout */ } static void yurex_delete(struct kref *kref) { struct usb_yurex *dev = to_yurex_dev(kref); dev_dbg(&dev->interface->dev, "%s\n", __func__); if (dev->cntl_urb) { usb_kill_urb(dev->cntl_urb); kfree(dev->cntl_req); usb_free_coherent(dev->udev, YUREX_BUF_SIZE, dev->cntl_buffer, dev->cntl_urb->transfer_dma); usb_free_urb(dev->cntl_urb); } if (dev->urb) { usb_kill_urb(dev->urb); usb_free_coherent(dev->udev, YUREX_BUF_SIZE, dev->int_buffer, dev->urb->transfer_dma); usb_free_urb(dev->urb); } usb_put_intf(dev->interface); usb_put_dev(dev->udev); kfree(dev); } /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with the driver core */ static struct usb_class_driver yurex_class = { .name = "yurex%d", .fops = &yurex_fops, .minor_base = YUREX_MINOR_BASE, }; static void yurex_interrupt(struct urb *urb) { struct usb_yurex *dev = urb->context; unsigned char *buf = dev->int_buffer; int status = urb->status; unsigned long flags; int retval, i; switch (status) { case 0: /*success*/ break; /* The device is terminated or messed up, give up */ case -EOVERFLOW: dev_err(&dev->interface->dev, "%s - overflow with length %d, actual length is %d\n", __func__, YUREX_BUF_SIZE, dev->urb->actual_length); return; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -EILSEQ: case -EPROTO: case -ETIME: return; default: dev_err(&dev->interface->dev, "%s - unknown status received: %d\n", __func__, status); return; } /* handle received message */ switch (buf[0]) { case CMD_COUNT: case CMD_READ: if (buf[6] == CMD_EOF) { spin_lock_irqsave(&dev->lock, flags); dev->bbu = 0; for (i = 1; i < 6; i++) { dev->bbu += buf[i]; if (i != 5) dev->bbu <<= 8; } dev_dbg(&dev->interface->dev, "%s count: %lld\n", __func__, dev->bbu); spin_unlock_irqrestore(&dev->lock, flags); kill_fasync(&dev->async_queue, SIGIO, POLL_IN); } else dev_dbg(&dev->interface->dev, "data format error - no EOF\n"); break; case CMD_ACK: dev_dbg(&dev->interface->dev, "%s ack: %c\n", __func__, buf[1]); wake_up_interruptible(&dev->waitq); break; } retval = usb_submit_urb(dev->urb, GFP_ATOMIC); if (retval) { dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n", __func__, retval); } } static int yurex_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_yurex *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int retval = -ENOMEM; DEFINE_WAIT(wait); int res; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto error; kref_init(&dev->kref); mutex_init(&dev->io_mutex); spin_lock_init(&dev->lock); init_waitqueue_head(&dev->waitq); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = usb_get_intf(interface); /* set up the endpoint information */ iface_desc = interface->cur_altsetting; res = usb_find_int_in_endpoint(iface_desc, &endpoint); if (res) { dev_err(&interface->dev, "Could not find endpoints\n"); retval = res; goto error; } dev->int_in_endpointAddr = endpoint->bEndpointAddress; /* allocate control URB */ dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->cntl_urb) goto error; /* allocate buffer for control req */ dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL); if (!dev->cntl_req) goto error; /* allocate buffer for control msg */ dev->cntl_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, GFP_KERNEL, &dev->cntl_urb->transfer_dma); if (!dev->cntl_buffer) { dev_err(&interface->dev, "Could not allocate cntl_buffer\n"); goto error; } /* configure control URB */ dev->cntl_req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; dev->cntl_req->bRequest = HID_REQ_SET_REPORT; dev->cntl_req->wValue = cpu_to_le16((HID_OUTPUT_REPORT + 1) << 8); dev->cntl_req->wIndex = cpu_to_le16(iface_desc->desc.bInterfaceNumber); dev->cntl_req->wLength = cpu_to_le16(YUREX_BUF_SIZE); usb_fill_control_urb(dev->cntl_urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)dev->cntl_req, dev->cntl_buffer, YUREX_BUF_SIZE, yurex_control_callback, dev); dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* allocate interrupt URB */ dev->urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb) goto error; /* allocate buffer for interrupt in */ dev->int_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, GFP_KERNEL, &dev->urb->transfer_dma); if (!dev->int_buffer) { dev_err(&interface->dev, "Could not allocate int_buffer\n"); goto error; } /* configure interrupt URB */ usb_fill_int_urb(dev->urb, dev->udev, usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr), dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, dev, 1); dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (usb_submit_urb(dev->urb, GFP_KERNEL)) { retval = -EIO; dev_err(&interface->dev, "Could not submitting URB\n"); goto error; } /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); dev->bbu = -1; /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &yurex_class); if (retval) { dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } dev_info(&interface->dev, "USB YUREX device now attached to Yurex #%d\n", interface->minor); return 0; error: if (dev) /* this frees allocated memory */ kref_put(&dev->kref, yurex_delete); return retval; } static void yurex_disconnect(struct usb_interface *interface) { struct usb_yurex *dev; int minor = interface->minor; dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); /* give back our minor */ usb_deregister_dev(interface, &yurex_class); /* prevent more I/O from starting */ usb_poison_urb(dev->urb); usb_poison_urb(dev->cntl_urb); mutex_lock(&dev->io_mutex); dev->disconnected = 1; mutex_unlock(&dev->io_mutex); /* wakeup waiters */ kill_fasync(&dev->async_queue, SIGIO, POLL_IN); wake_up_interruptible(&dev->waitq); /* decrement our usage count */ kref_put(&dev->kref, yurex_delete); dev_info(&interface->dev, "USB YUREX #%d now disconnected\n", minor); } static struct usb_driver yurex_driver = { .name = "yurex", .probe = yurex_probe, .disconnect = yurex_disconnect, .id_table = yurex_table, }; static int yurex_fasync(int fd, struct file *file, int on) { struct usb_yurex *dev; dev = file->private_data; return fasync_helper(fd, file, on, &dev->async_queue); } static int yurex_open(struct inode *inode, struct file *file) { struct usb_yurex *dev; struct usb_interface *interface; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&yurex_driver, subminor); if (!interface) { printk(KERN_ERR "%s - error, can't find device for minor %d", __func__, subminor); retval = -ENODEV; goto exit; } dev = usb_get_intfdata(interface); if (!dev) { retval = -ENODEV; goto exit; } /* increment our usage count for the device */ kref_get(&dev->kref); /* save our object in the file's private structure */ mutex_lock(&dev->io_mutex); file->private_data = dev; mutex_unlock(&dev->io_mutex); exit: return retval; } static int yurex_release(struct inode *inode, struct file *file) { struct usb_yurex *dev; dev = file->private_data; if (dev == NULL) return -ENODEV; /* decrement the count on our device */ kref_put(&dev->kref, yurex_delete); return 0; } static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct usb_yurex *dev; int len; char in_buffer[20]; unsigned long flags; dev = file->private_data; mutex_lock(&dev->io_mutex); if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); return -ENODEV; } spin_lock_irqsave(&dev->lock, flags); len = snprintf(in_buffer, 20, "%lld\n", dev->bbu); spin_unlock_irqrestore(&dev->lock, flags); mutex_unlock(&dev->io_mutex); if (WARN_ON_ONCE(len >= sizeof(in_buffer))) return -EIO; return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); } static ssize_t yurex_write(struct file *file, const char __user *user_buffer, size_t count, loff_t *ppos) { struct usb_yurex *dev; int i, set = 0, retval = 0; char buffer[16 + 1]; char *data = buffer; unsigned long long c, c2 = 0; signed long timeout = 0; DEFINE_WAIT(wait); count = min(sizeof(buffer) - 1, count); dev = file->private_data; /* verify that we actually have some data to write */ if (count == 0) goto error; retval = mutex_lock_interruptible(&dev->io_mutex); if (retval < 0) return -EINTR; if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; } if (copy_from_user(buffer, user_buffer, count)) { mutex_unlock(&dev->io_mutex); retval = -EFAULT; goto error; } buffer[count] = 0; memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); switch (buffer[0]) { case CMD_ANIMATE: case CMD_LED: dev->cntl_buffer[0] = buffer[0]; dev->cntl_buffer[1] = buffer[1]; dev->cntl_buffer[2] = CMD_EOF; break; case CMD_READ: case CMD_VERSION: dev->cntl_buffer[0] = buffer[0]; dev->cntl_buffer[1] = 0x00; dev->cntl_buffer[2] = CMD_EOF; break; case CMD_SET: data++; fallthrough; case '0' ... '9': set = 1; c = c2 = simple_strtoull(data, NULL, 0); dev->cntl_buffer[0] = CMD_SET; for (i = 1; i < 6; i++) { dev->cntl_buffer[i] = (c>>32) & 0xff; c <<= 8; } buffer[6] = CMD_EOF; break; default: mutex_unlock(&dev->io_mutex); return -EINVAL; } /* send the data as the control msg */ prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE); dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__, dev->cntl_buffer[0]); retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC); if (retval >= 0) timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); /* make sure URB is idle after timeout or (spurious) CMD_ACK */ usb_kill_urb(dev->cntl_urb); mutex_unlock(&dev->io_mutex); if (retval < 0) { dev_err(&dev->interface->dev, "%s - failed to send bulk msg, error %d\n", __func__, retval); goto error; } if (set && timeout) { spin_lock_irq(&dev->lock); dev->bbu = c2; spin_unlock_irq(&dev->lock); } return timeout ? count : -EIO; error: return retval; } static const struct file_operations yurex_fops = { .owner = THIS_MODULE, .read = yurex_read, .write = yurex_write, .open = yurex_open, .release = yurex_release, .fasync = yurex_fasync, .llseek = default_llseek, }; module_usb_driver(yurex_driver); MODULE_DESCRIPTION("USB YUREX driver support"); MODULE_LICENSE("GPL");
10 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 // SPDX-License-Identifier: GPL-2.0 /* * Wifi Band Exclusion Interface for WLAN * Copyright (C) 2023 Advanced Micro Devices * Copyright (C) 2025 Intel Corporation * */ #include <linux/acpi_amd_wbrf.h> #include <linux/units.h> #include <net/cfg80211.h> #include "ieee80211_i.h" void ieee80211_check_wbrf_support(struct ieee80211_local *local) { struct wiphy *wiphy = local->hw.wiphy; struct device *dev; if (!wiphy) return; dev = wiphy->dev.parent; if (!dev) return; local->wbrf_supported = acpi_amd_wbrf_supported_producer(dev); } static void get_chan_freq_boundary(u32 center_freq, u32 bandwidth, u64 *start, u64 *end) { bandwidth *= KHZ_PER_MHZ; center_freq *= KHZ_PER_MHZ; *start = center_freq - bandwidth / 2; *end = center_freq + bandwidth / 2; /* Frequency in Hz is expected */ *start = *start * HZ_PER_KHZ; *end = *end * HZ_PER_KHZ; } static void get_ranges_from_chandef(struct cfg80211_chan_def *chandef, struct wbrf_ranges_in_out *ranges_in) { u64 start_freq1, end_freq1; u64 start_freq2, end_freq2; int bandwidth; bandwidth = cfg80211_chandef_get_width(chandef); get_chan_freq_boundary(chandef->center_freq1, bandwidth, &start_freq1, &end_freq1); ranges_in->band_list[0].start = start_freq1; ranges_in->band_list[0].end = end_freq1; ranges_in->num_of_ranges = 1; if (chandef->width == NL80211_CHAN_WIDTH_80P80) { get_chan_freq_boundary(chandef->center_freq2, bandwidth, &start_freq2, &end_freq2); ranges_in->band_list[1].start = start_freq2; ranges_in->band_list[1].end = end_freq2; ranges_in->num_of_ranges++; } } void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef) { struct wbrf_ranges_in_out ranges_in = {0}; struct device *dev; if (!local->wbrf_supported) return; dev = local->hw.wiphy->dev.parent; get_ranges_from_chandef(chandef, &ranges_in); acpi_amd_wbrf_add_remove(dev, WBRF_RECORD_ADD, &ranges_in); } void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef) { struct wbrf_ranges_in_out ranges_in = {0}; struct device *dev; if (!local->wbrf_supported) return; dev = local->hw.wiphy->dev.parent; get_ranges_from_chandef(chandef, &ranges_in); acpi_amd_wbrf_add_remove(dev, WBRF_RECORD_REMOVE, &ranges_in); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* cx231xx.h - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver */ #ifndef _CX231XX_H #define _CX231XX_H #include <linux/videodev2.h> #include <linux/types.h> #include <linux/ioctl.h> #include <linux/i2c.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/usb.h> #include <media/drv-intf/cx2341x.h> #include <media/videobuf2-vmalloc.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fh.h> #include <media/rc-core.h> #include <media/i2c/ir-kbd-i2c.h> #include "cx231xx-reg.h" #include "cx231xx-pcb-cfg.h" #include "cx231xx-conf-reg.h" #define DRIVER_NAME "cx231xx" #define PWR_SLEEP_INTERVAL 10 /* I2C addresses for control block in Cx231xx */ #define AFE_DEVICE_ADDRESS 0x60 #define I2S_BLK_DEVICE_ADDRESS 0x98 #define VID_BLK_I2C_ADDRESS 0x88 #define VERVE_I2C_ADDRESS 0x40 #define DIF_USE_BASEBAND 0xFFFFFFFF /* Boards supported by driver */ #define CX231XX_BOARD_UNKNOWN 0 #define CX231XX_BOARD_CNXT_CARRAERA 1 #define CX231XX_BOARD_CNXT_SHELBY 2 #define CX231XX_BOARD_CNXT_RDE_253S 3 #define CX231XX_BOARD_CNXT_RDU_253S 4 #define CX231XX_BOARD_CNXT_VIDEO_GRABBER 5 #define CX231XX_BOARD_CNXT_RDE_250 6 #define CX231XX_BOARD_CNXT_RDU_250 7 #define CX231XX_BOARD_HAUPPAUGE_EXETER 8 #define CX231XX_BOARD_HAUPPAUGE_USBLIVE2 9 #define CX231XX_BOARD_PV_PLAYTV_USB_HYBRID 10 #define CX231XX_BOARD_PV_XCAPTURE_USB 11 #define CX231XX_BOARD_KWORLD_UB430_USB_HYBRID 12 #define CX231XX_BOARD_ICONBIT_U100 13 #define CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL 14 #define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15 #define CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2 16 #define CX231XX_BOARD_OTG102 17 #define CX231XX_BOARD_KWORLD_UB445_USB_HYBRID 18 #define CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx 19 #define CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx 20 #define CX231XX_BOARD_HAUPPAUGE_955Q 21 #define CX231XX_BOARD_TERRATEC_GRABBY 22 #define CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD 23 #define CX231XX_BOARD_ASTROMETA_T2HYBRID 24 #define CX231XX_BOARD_THE_IMAGING_SOURCE_DFG_USB2_PRO 25 #define CX231XX_BOARD_HAUPPAUGE_935C 26 #define CX231XX_BOARD_HAUPPAUGE_975 27 /* Limits minimum and default number of buffers */ #define CX231XX_MIN_BUF 4 #define CX231XX_DEF_BUF 12 #define CX231XX_DEF_VBI_BUF 6 #define VBI_LINE_COUNT 17 #define VBI_LINE_LENGTH 1440 /*Limits the max URB message size */ #define URB_MAX_CTRL_SIZE 80 /* Params for validated field */ #define CX231XX_BOARD_NOT_VALIDATED 1 #define CX231XX_BOARD_VALIDATED 0 /* maximum number of cx231xx boards */ #define CX231XX_MAXBOARDS 8 /* maximum number of frames that can be queued */ #define CX231XX_NUM_FRAMES 5 /* number of buffers for isoc transfers */ #define CX231XX_NUM_BUFS 8 /* number of packets for each buffer windows requests only 40 packets .. so we better do the same this is what I found out for all alternate numbers there! */ #define CX231XX_NUM_PACKETS 40 /* default alternate; 0 means choose the best */ #define CX231XX_PINOUT 0 #define CX231XX_INTERLACED_DEFAULT 1 /* time to wait when stopping the isoc transfer */ #define CX231XX_URB_TIMEOUT \ msecs_to_jiffies(CX231XX_NUM_BUFS * CX231XX_NUM_PACKETS) #define CX231xx_NORMS (\ V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP | V4L2_STD_NTSC_443 | \ V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \ V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \ V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK) #define SLEEP_S5H1432 30 #define CX23417_OSC_EN 8 #define CX23417_RESET 9 #define EP5_BUF_SIZE 4096 #define EP5_TIMEOUT_MS 2000 struct cx23417_fmt { u32 fourcc; /* v4l2 format id */ int depth; int flags; u32 cxformat; }; enum cx231xx_mode { CX231XX_SUSPEND, CX231XX_ANALOG_MODE, CX231XX_DIGITAL_MODE, }; enum cx231xx_std_mode { CX231XX_TV_AIR = 0, CX231XX_TV_CABLE }; enum cx231xx_stream_state { STREAM_OFF, STREAM_INTERRUPT, STREAM_ON, }; struct cx231xx; struct cx231xx_isoc_ctl { /* max packet size of isoc transaction */ int max_pkt_size; /* number of allocated urbs */ int num_bufs; /* urb for isoc transfers */ struct urb **urb; /* transfer buffers for isoc transfer */ char **transfer_buffer; /* Last buffer command and region */ u8 cmd; int pos, size, pktsize; /* Last field: ODD or EVEN? */ int field; /* Stores incomplete commands */ u32 tmp_buf; int tmp_buf_len; /* Stores already requested buffers */ struct cx231xx_buffer *buf; /* Stores the number of received fields */ int nfields; /* isoc urb callback */ int (*isoc_copy) (struct cx231xx *dev, struct urb *urb); }; struct cx231xx_bulk_ctl { /* max packet size of bulk transaction */ int max_pkt_size; /* number of allocated urbs */ int num_bufs; /* urb for bulk transfers */ struct urb **urb; /* transfer buffers for bulk transfer */ char **transfer_buffer; /* Last buffer command and region */ u8 cmd; int pos, size, pktsize; /* Last field: ODD or EVEN? */ int field; /* Stores incomplete commands */ u32 tmp_buf; int tmp_buf_len; /* Stores already requested buffers */ struct cx231xx_buffer *buf; /* Stores the number of received fields */ int nfields; /* bulk urb callback */ int (*bulk_copy) (struct cx231xx *dev, struct urb *urb); }; struct cx231xx_fmt { char *name; u32 fourcc; /* v4l2 format id */ int depth; int reg; }; /* buffer for one video frame */ struct cx231xx_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vb; struct list_head list; struct list_head frame; int top_field; int receiving; }; enum ps_package_head { CX231XX_NEED_ADD_PS_PACKAGE_HEAD = 0, CX231XX_NONEED_PS_PACKAGE_HEAD }; struct cx231xx_dmaqueue { struct list_head active; wait_queue_head_t wq; /* Counters to control buffer fill */ int pos; u8 is_partial_line; u8 partial_buf[8]; u8 last_sav; int current_field; u32 bytes_left_in_line; u32 lines_completed; u8 field1_done; u32 lines_per_field; u32 sequence; /*Mpeg2 control buffer*/ u8 *p_left_data; u32 left_data_count; u8 mpeg_buffer_done; u32 mpeg_buffer_completed; enum ps_package_head add_ps_package_head; char ps_head[10]; }; /* inputs */ #define MAX_CX231XX_INPUT 4 enum cx231xx_itype { CX231XX_VMUX_COMPOSITE1 = 1, CX231XX_VMUX_SVIDEO, CX231XX_VMUX_TELEVISION, CX231XX_VMUX_CABLE, CX231XX_RADIO, CX231XX_VMUX_DVB, }; enum cx231xx_v_input { CX231XX_VIN_1_1 = 0x1, CX231XX_VIN_2_1, CX231XX_VIN_3_1, CX231XX_VIN_4_1, CX231XX_VIN_1_2 = 0x01, CX231XX_VIN_2_2, CX231XX_VIN_3_2, CX231XX_VIN_1_3 = 0x1, CX231XX_VIN_2_3, CX231XX_VIN_3_3, }; /* cx231xx has two audio inputs: tuner and line in */ enum cx231xx_amux { /* This is the only entry for cx231xx tuner input */ CX231XX_AMUX_VIDEO, /* cx231xx tuner */ CX231XX_AMUX_LINE_IN, /* Line In */ }; struct cx231xx_reg_seq { unsigned char bit; unsigned char val; int sleep; }; struct cx231xx_input { enum cx231xx_itype type; unsigned int vmux; enum cx231xx_amux amux; struct cx231xx_reg_seq *gpio; }; #define INPUT(nr) (&cx231xx_boards[dev->model].input[nr]) enum cx231xx_decoder { CX231XX_NODECODER, CX231XX_AVDECODER }; enum CX231XX_I2C_MASTER_PORT { I2C_0 = 0, /* master 0 - internal connection */ I2C_1 = 1, /* master 1 - used with mux */ I2C_2 = 2, /* master 2 */ I2C_1_MUX_1 = 3, /* master 1 - port 1 (I2C_DEMOD_EN = 0) */ I2C_1_MUX_3 = 4 /* master 1 - port 3 (I2C_DEMOD_EN = 1) */ }; struct cx231xx_board { char *name; int vchannels; int tuner_type; int tuner_addr; v4l2_std_id norm; /* tv norm */ /* demod related */ int demod_addr; int demod_addr2; u8 demod_xfer_mode; /* 0 - Serial; 1 - parallel */ /* GPIO Pins */ struct cx231xx_reg_seq *dvb_gpio; struct cx231xx_reg_seq *suspend_gpio; struct cx231xx_reg_seq *tuner_gpio; /* Negative means don't use it */ s8 tuner_sif_gpio; s8 tuner_scl_gpio; s8 tuner_sda_gpio; /* PIN ctrl */ u32 ctl_pin_status_mask; u8 agc_analog_digital_select_gpio; u32 gpio_pin_status_mask; /* i2c masters */ u8 tuner_i2c_master; u8 demod_i2c_master; u8 ir_i2c_master; /* for devices with I2C chips for IR */ char *rc_map_name; unsigned int max_range_640_480:1; unsigned int has_dvb:1; unsigned int has_417:1; unsigned int valid:1; unsigned int no_alt_vanc:1; unsigned int external_av:1; unsigned char xclk, i2c_speed; enum cx231xx_decoder decoder; int output_mode; struct cx231xx_input input[MAX_CX231XX_INPUT]; struct cx231xx_input radio; struct rc_map *ir_codes; }; /* device states */ enum cx231xx_dev_state { DEV_INITIALIZED = 0x01, DEV_DISCONNECTED = 0x02, }; enum AFE_MODE { AFE_MODE_LOW_IF, AFE_MODE_BASEBAND, AFE_MODE_EU_HI_IF, AFE_MODE_US_HI_IF, AFE_MODE_JAPAN_HI_IF }; enum AUDIO_INPUT { AUDIO_INPUT_MUTE, AUDIO_INPUT_LINE, AUDIO_INPUT_TUNER_TV, AUDIO_INPUT_SPDIF, AUDIO_INPUT_TUNER_FM }; #define CX231XX_AUDIO_BUFS 5 #define CX231XX_NUM_AUDIO_PACKETS 16 #define CX231XX_ISO_NUM_AUDIO_PACKETS 64 /* cx231xx extensions */ #define CX231XX_AUDIO 0x10 #define CX231XX_DVB 0x20 struct cx231xx_audio { char name[50]; char *transfer_buffer[CX231XX_AUDIO_BUFS]; struct urb *urb[CX231XX_AUDIO_BUFS]; struct usb_device *udev; unsigned int capture_transfer_done; struct snd_pcm_substream *capture_pcm_substream; unsigned int hwptr_done_capture; struct snd_card *sndcard; int users, shutdown; /* locks */ spinlock_t slock; int alt; /* alternate */ int max_pkt_size; /* max packet size of isoc transaction */ int num_alt; /* Number of alternative settings */ unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */ u16 end_point_addr; }; /*****************************************************************/ /* set/get i2c */ /* 00--1Mb/s, 01-400kb/s, 10--100kb/s, 11--5Mb/s */ #define I2C_SPEED_1M 0x0 #define I2C_SPEED_400K 0x1 #define I2C_SPEED_100K 0x2 #define I2C_SPEED_5M 0x3 /* 0-- STOP transaction */ #define I2C_STOP 0x0 /* 1-- do not transmit STOP at end of transaction */ #define I2C_NOSTOP 0x1 /* 1--allow slave to insert clock wait states */ #define I2C_SYNC 0x1 struct cx231xx_i2c { struct cx231xx *dev; int nr; /* i2c i/o */ struct i2c_adapter i2c_adap; int i2c_rc; /* different settings for each bus */ u8 i2c_period; u8 i2c_nostop; u8 i2c_reserve; }; struct cx231xx_i2c_xfer_data { u8 dev_addr; u8 direction; /* 1 - IN, 0 - OUT */ u8 saddr_len; /* sub address len */ u16 saddr_dat; /* sub addr data */ u8 buf_size; /* buffer size */ u8 *p_buffer; /* pointer to the buffer */ }; struct VENDOR_REQUEST_IN { u8 bRequest; u16 wValue; u16 wIndex; u16 wLength; u8 direction; u8 bData; u8 *pBuff; }; struct cx231xx_tvnorm { char *name; v4l2_std_id id; u32 cxiformat; u32 cxoformat; }; enum TRANSFER_TYPE { Raw_Video = 0, Audio, Vbi, /* VANC */ Sliced_cc, /* HANC */ TS1_serial_mode, TS2, TS1_parallel_mode } ; struct cx231xx_video_mode { /* Isoc control struct */ struct cx231xx_dmaqueue vidq; struct cx231xx_isoc_ctl isoc_ctl; struct cx231xx_bulk_ctl bulk_ctl; /* locks */ spinlock_t slock; /* usb transfer */ int alt; /* alternate */ int max_pkt_size; /* max packet size of isoc transaction */ int num_alt; /* Number of alternative settings */ unsigned int *alt_max_pkt_size; /* array of wMaxPacketSize */ u16 end_point_addr; }; struct cx231xx_tsport { struct cx231xx *dev; int nr; int sram_chno; /* dma queues */ u32 ts_packet_size; u32 ts_packet_count; int width; int height; /* locks */ spinlock_t slock; /* registers */ u32 reg_gpcnt; u32 reg_gpcnt_ctl; u32 reg_dma_ctl; u32 reg_lngth; u32 reg_hw_sop_ctrl; u32 reg_gen_ctrl; u32 reg_bd_pkt_status; u32 reg_sop_status; u32 reg_fifo_ovfl_stat; u32 reg_vld_misc; u32 reg_ts_clk_en; u32 reg_ts_int_msk; u32 reg_ts_int_stat; u32 reg_src_sel; /* Default register vals */ int pci_irqmask; u32 dma_ctl_val; u32 ts_int_msk_val; u32 gen_ctrl_val; u32 ts_clk_en_val; u32 src_sel_val; u32 vld_misc_val; u32 hw_sop_ctrl_val; /* Allow a single tsport to have multiple frontends */ u32 num_frontends; void *port_priv; }; /* main device struct */ struct cx231xx { /* generic device properties */ char name[30]; /* name (including minor) of the device */ int model; /* index in the device_data struct */ int devno; /* marks the number of this device */ struct device *dev; /* pointer to USB interface's dev */ struct cx231xx_board board; /* For I2C IR support */ struct IR_i2c_init_data init_data; struct i2c_client *ir_i2c_client; unsigned int stream_on:1; /* Locks streams */ unsigned int vbi_stream_on:1; /* Locks streams for VBI */ unsigned int has_audio_class:1; unsigned int has_alsa_audio:1; unsigned int i2c_scan_running:1; /* true only during i2c_scan */ struct cx231xx_fmt *format; struct v4l2_device v4l2_dev; struct v4l2_subdev *sd_cx25840; struct v4l2_subdev *sd_tuner; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl_handler radio_ctrl_handler; struct cx2341x_handler mpeg_ctrl_handler; struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */ atomic_t stream_started; /* stream should be running if true */ struct list_head devlist; int tuner_type; /* type of the tuner */ int tuner_addr; /* tuner address */ /* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */ struct cx231xx_i2c i2c_bus[3]; struct i2c_mux_core *muxc; struct i2c_adapter *i2c_mux_adap[2]; unsigned int xc_fw_load_done:1; unsigned int port_3_switch_enabled:1; /* locks */ struct mutex gpio_i2c_lock; struct mutex i2c_lock; /* video for linux */ int users; /* user count for exclusive use */ struct video_device vdev; /* video for linux device struct */ v4l2_std_id norm; /* selected tv norm */ int ctl_freq; /* selected frequency */ unsigned int ctl_ainput; /* selected audio input */ /* frame properties */ int width; /* current frame width */ int height; /* current frame height */ int interlaced; /* 1=interlace fields, 0=just top fields */ unsigned int size; struct cx231xx_audio adev; /* states */ enum cx231xx_dev_state state; struct work_struct request_module_wk; /* locks */ struct mutex lock; struct mutex ctrl_urb_lock; /* protects urb_buf */ struct list_head inqueue, outqueue; wait_queue_head_t open, wait_frame, wait_stream; struct video_device vbi_dev; struct video_device radio_dev; #if defined(CONFIG_MEDIA_CONTROLLER) struct media_device *media_dev; struct media_pad video_pad, vbi_pad; struct media_entity input_ent[MAX_CX231XX_INPUT]; struct media_pad input_pad[MAX_CX231XX_INPUT]; #endif struct vb2_queue vidq; struct vb2_queue vbiq; unsigned char eedata[256]; struct cx231xx_video_mode video_mode; struct cx231xx_video_mode vbi_mode; struct cx231xx_video_mode sliced_cc_mode; struct cx231xx_video_mode ts1_mode; atomic_t devlist_count; struct usb_device *udev; /* the usb device */ char urb_buf[URB_MAX_CTRL_SIZE]; /* urb control msg buffer */ /* helper funcs that call usb_control_msg */ int (*cx231xx_read_ctrl_reg) (struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); int (*cx231xx_write_ctrl_reg) (struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); int (*cx231xx_send_usb_command) (struct cx231xx_i2c *i2c_bus, struct cx231xx_i2c_xfer_data *req_data); int (*cx231xx_gpio_i2c_read) (struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); int (*cx231xx_gpio_i2c_write) (struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); int (*cx231xx_set_analog_freq) (struct cx231xx *dev, u32 freq); int (*cx231xx_reset_analog_tuner) (struct cx231xx *dev); enum cx231xx_mode mode; struct cx231xx_dvb *dvb; /* Cx231xx supported PCB config's */ struct pcb_config current_pcb_config; u8 current_scenario_idx; u8 interface_count; u8 max_iad_interface_count; /* GPIO related register direction and values */ u32 gpio_dir; u32 gpio_val; /* Power Modes */ int power_mode; /* afe parameters */ enum AFE_MODE afe_mode; u32 afe_ref_count; /* video related parameters */ u32 video_input; u32 active_mode; u8 vbi_or_sliced_cc_mode; /* 0 - vbi ; 1 - sliced cc mode */ enum cx231xx_std_mode std_mode; /* 0 - Air; 1 - cable */ /*mode: digital=1 or analog=0*/ u8 mode_tv; u8 USE_ISO; struct cx231xx_tvnorm encodernorm; struct cx231xx_tsport ts1, ts2; struct vb2_queue mpegq; struct video_device v4l_device; atomic_t v4l_reader_count; u32 freq; unsigned int input; u32 cx23417_mailbox; u32 __iomem *lmmio; u8 __iomem *bmmio; }; extern struct list_head cx231xx_devlist; #define cx25840_call(cx231xx, o, f, args...) \ v4l2_subdev_call(cx231xx->sd_cx25840, o, f, ##args) #define tuner_call(cx231xx, o, f, args...) \ v4l2_subdev_call(cx231xx->sd_tuner, o, f, ##args) #define call_all(dev, o, f, args...) \ v4l2_device_call_until_err(&dev->v4l2_dev, 0, o, f, ##args) struct cx231xx_ops { struct list_head next; char *name; int id; int (*init) (struct cx231xx *); int (*fini) (struct cx231xx *); }; /* call back functions in dvb module */ int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq); int cx231xx_reset_analog_tuner(struct cx231xx *dev); /* Provided by cx231xx-i2c.c */ void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port); int cx231xx_i2c_register(struct cx231xx_i2c *bus); void cx231xx_i2c_unregister(struct cx231xx_i2c *bus); int cx231xx_i2c_mux_create(struct cx231xx *dev); int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no); void cx231xx_i2c_mux_unregister(struct cx231xx *dev); struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port); /* Internal block control functions */ int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len, int master); int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len, int master); int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len); int cx231xx_write_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len); int cx231xx_reg_mask_write(struct cx231xx *dev, u8 dev_addr, u8 size, u16 register_address, u8 bit_start, u8 bit_end, u32 value); int cx231xx_read_modify_write_i2c_dword(struct cx231xx *dev, u8 dev_addr, u16 saddr, u32 mask, u32 value); u32 cx231xx_set_field(u32 field_mask, u32 data); /*verve r/w*/ void initGPIO(struct cx231xx *dev); void uninitGPIO(struct cx231xx *dev); /* afe related functions */ int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count); int cx231xx_afe_init_channels(struct cx231xx *dev); int cx231xx_afe_setup_AFE_for_baseband(struct cx231xx *dev); int cx231xx_afe_set_input_mux(struct cx231xx *dev, u32 input_mux); int cx231xx_afe_set_mode(struct cx231xx *dev, enum AFE_MODE mode); int cx231xx_afe_update_power_control(struct cx231xx *dev, enum AV_MODE avmode); int cx231xx_afe_adjust_ref_count(struct cx231xx *dev, u32 video_input); /* i2s block related functions */ int cx231xx_i2s_blk_initialize(struct cx231xx *dev); int cx231xx_i2s_blk_update_power_control(struct cx231xx *dev, enum AV_MODE avmode); int cx231xx_i2s_blk_set_audio_input(struct cx231xx *dev, u8 audio_input); /* DIF related functions */ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode, u32 function_mode, u32 standard); void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode); u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd); void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode); void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev); void reset_s5h1432_demod(struct cx231xx *dev); void update_HH_register_after_set_DIF(struct cx231xx *dev); int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard); int cx231xx_tuner_pre_channel_change(struct cx231xx *dev); int cx231xx_tuner_post_channel_change(struct cx231xx *dev); /* video parser functions */ u8 cx231xx_find_next_SAV_EAV(u8 *p_buffer, u32 buffer_size, u32 *p_bytes_used); u8 cx231xx_find_boundary_SAV_EAV(u8 *p_buffer, u8 *partial_buf, u32 *p_bytes_used); int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 *p_buffer, u32 bytes_to_copy); void cx231xx_reset_video_buffer(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q); u8 cx231xx_is_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q); u32 cx231xx_copy_video_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 *p_line, u32 length, int field_number); u32 cx231xx_get_video_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, u8 sav_eav, u8 *p_buffer, u32 buffer_size); void cx231xx_swab(u16 *from, u16 *to, u16 len); /* Provided by cx231xx-core.c */ u32 cx231xx_request_buffers(struct cx231xx *dev, u32 count); void cx231xx_queue_unusedframes(struct cx231xx *dev); void cx231xx_release_buffers(struct cx231xx *dev); /* read from control pipe */ int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); /* write to control pipe */ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len); int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode); int cx231xx_send_vendor_cmd(struct cx231xx *dev, struct VENDOR_REQUEST_IN *ven_req); int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus, struct cx231xx_i2c_xfer_data *req_data); /* Gpio related functions */ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val, u8 len, u8 request, u8 direction); int cx231xx_set_gpio_value(struct cx231xx *dev, int pin_number, int pin_value); int cx231xx_set_gpio_direction(struct cx231xx *dev, int pin_number, int pin_value); int cx231xx_gpio_i2c_start(struct cx231xx *dev); int cx231xx_gpio_i2c_end(struct cx231xx *dev); int cx231xx_gpio_i2c_write_byte(struct cx231xx *dev, u8 data); int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 *buf); int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev); int cx231xx_gpio_i2c_write_ack(struct cx231xx *dev); int cx231xx_gpio_i2c_write_nak(struct cx231xx *dev); int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len); /* audio related functions */ int cx231xx_set_audio_decoder_input(struct cx231xx *dev, enum AUDIO_INPUT audio_input); int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type); int cx231xx_set_video_alternate(struct cx231xx *dev); int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt); int is_fw_load(struct cx231xx *dev); int cx231xx_check_fw(struct cx231xx *dev); int cx231xx_init_isoc(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*isoc_copy) (struct cx231xx *dev, struct urb *urb)); int cx231xx_init_bulk(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*bulk_copy) (struct cx231xx *dev, struct urb *urb)); void cx231xx_stop_TS1(struct cx231xx *dev); void cx231xx_start_TS1(struct cx231xx *dev); void cx231xx_uninit_isoc(struct cx231xx *dev); void cx231xx_uninit_bulk(struct cx231xx *dev); int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode); int cx231xx_unmute_audio(struct cx231xx *dev); int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size); void cx231xx_disable656(struct cx231xx *dev); void cx231xx_enable656(struct cx231xx *dev); int cx231xx_demod_reset(struct cx231xx *dev); int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio); /* Device list functions */ void cx231xx_release_resources(struct cx231xx *dev); void cx231xx_release_analog_resources(struct cx231xx *dev); int cx231xx_register_analog_devices(struct cx231xx *dev); void cx231xx_remove_from_devlist(struct cx231xx *dev); void cx231xx_add_into_devlist(struct cx231xx *dev); void cx231xx_init_extension(struct cx231xx *dev); void cx231xx_close_extension(struct cx231xx *dev); /* hardware init functions */ int cx231xx_dev_init(struct cx231xx *dev); void cx231xx_dev_uninit(struct cx231xx *dev); void cx231xx_config_i2c(struct cx231xx *dev); int cx231xx_config(struct cx231xx *dev); /* Stream control functions */ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask); int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask); int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type); /* Power control functions */ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode); /* chip specific control functions */ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev); int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, u8 analog_or_digital); int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3); /* video audio decoder related functions */ void video_mux(struct cx231xx *dev, int index); int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input); int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u32 input); int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev); int cx231xx_set_audio_input(struct cx231xx *dev, u8 input); /* Provided by cx231xx-video.c */ int cx231xx_register_extension(struct cx231xx_ops *dev); void cx231xx_unregister_extension(struct cx231xx_ops *dev); void cx231xx_init_extension(struct cx231xx *dev); void cx231xx_close_extension(struct cx231xx *dev); void cx231xx_v4l2_create_entities(struct cx231xx *dev); int cx231xx_querycap(struct file *file, void *priv, struct v4l2_capability *cap); int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t); int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t); int cx231xx_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f); int cx231xx_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f); int cx231xx_enum_input(struct file *file, void *priv, struct v4l2_input *i); int cx231xx_g_input(struct file *file, void *priv, unsigned int *i); int cx231xx_s_input(struct file *file, void *priv, unsigned int i); int cx231xx_g_chip_info(struct file *file, void *fh, struct v4l2_dbg_chip_info *chip); int cx231xx_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg); int cx231xx_s_register(struct file *file, void *priv, const struct v4l2_dbg_register *reg); /* Provided by cx231xx-cards.c */ extern void cx231xx_pre_card_setup(struct cx231xx *dev); extern void cx231xx_card_setup(struct cx231xx *dev); extern struct cx231xx_board cx231xx_boards[]; extern struct usb_device_id cx231xx_id_table[]; int cx231xx_tuner_callback(void *ptr, int component, int command, int arg); /* cx23885-417.c */ extern int cx231xx_417_register(struct cx231xx *dev); extern void cx231xx_417_unregister(struct cx231xx *dev); /* cx23885-input.c */ #if defined(CONFIG_VIDEO_CX231XX_RC) int cx231xx_ir_init(struct cx231xx *dev); void cx231xx_ir_exit(struct cx231xx *dev); #else static inline int cx231xx_ir_init(struct cx231xx *dev) { return 0; } static inline void cx231xx_ir_exit(struct cx231xx *dev) {} #endif static inline unsigned int norm_maxw(struct cx231xx *dev) { if (dev->board.max_range_640_480) return 640; else return 720; } static inline unsigned int norm_maxh(struct cx231xx *dev) { if (dev->board.max_range_640_480) return 480; else return (dev->norm & V4L2_STD_625_50) ? 576 : 480; } #endif
4 8 8 8 8 8 7 4 4 4 4 7 7 7 4 4 4 4 4 4 7 7 7 7 7 7 7 7 7 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include <net/genetlink.h> #define CREATE_TRACE_POINTS #include <trace/events/devlink.h> #include "devl_internal.h" EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report); DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC); static struct devlink *devlinks_xa_get(unsigned long index) { struct devlink *devlink; rcu_read_lock(); devlink = xa_find(&devlinks, &index, index, DEVLINK_REGISTERED); if (!devlink || !devlink_try_get(devlink)) devlink = NULL; rcu_read_unlock(); return devlink; } /* devlink_rels xarray contains 1:1 relationships between * devlink object and related nested devlink instance. * The xarray index is used to get the nested object from * the nested-in object code. */ static DEFINE_XARRAY_FLAGS(devlink_rels, XA_FLAGS_ALLOC1); #define DEVLINK_REL_IN_USE XA_MARK_0 struct devlink_rel { u32 index; refcount_t refcount; u32 devlink_index; struct { u32 devlink_index; u32 obj_index; devlink_rel_notify_cb_t *notify_cb; devlink_rel_cleanup_cb_t *cleanup_cb; struct delayed_work notify_work; } nested_in; }; static void devlink_rel_free(struct devlink_rel *rel) { xa_erase(&devlink_rels, rel->index); kfree(rel); } static void __devlink_rel_get(struct devlink_rel *rel) { refcount_inc(&rel->refcount); } static void __devlink_rel_put(struct devlink_rel *rel) { if (refcount_dec_and_test(&rel->refcount)) devlink_rel_free(rel); } static void devlink_rel_nested_in_notify_work(struct work_struct *work) { struct devlink_rel *rel = container_of(work, struct devlink_rel, nested_in.notify_work.work); struct devlink *devlink; devlink = devlinks_xa_get(rel->nested_in.devlink_index); if (!devlink) goto rel_put; if (!devl_trylock(devlink)) { devlink_put(devlink); goto reschedule_work; } if (!devl_is_registered(devlink)) { devl_unlock(devlink); devlink_put(devlink); goto rel_put; } if (!xa_get_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE)) rel->nested_in.cleanup_cb(devlink, rel->nested_in.obj_index, rel->index); rel->nested_in.notify_cb(devlink, rel->nested_in.obj_index); devl_unlock(devlink); devlink_put(devlink); rel_put: __devlink_rel_put(rel); return; reschedule_work: schedule_delayed_work(&rel->nested_in.notify_work, 1); } static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel) { __devlink_rel_get(rel); schedule_delayed_work(&rel->nested_in.notify_work, 0); } static struct devlink_rel *devlink_rel_alloc(void) { struct devlink_rel *rel; static u32 next; int err; rel = kzalloc(sizeof(*rel), GFP_KERNEL); if (!rel) return ERR_PTR(-ENOMEM); err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel, xa_limit_32b, &next, GFP_KERNEL); if (err < 0) { kfree(rel); return ERR_PTR(err); } refcount_set(&rel->refcount, 1); INIT_DELAYED_WORK(&rel->nested_in.notify_work, &devlink_rel_nested_in_notify_work); return rel; } static void devlink_rel_put(struct devlink *devlink) { struct devlink_rel *rel = devlink->rel; if (!rel) return; xa_clear_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE); devlink_rel_nested_in_notify_work_schedule(rel); __devlink_rel_put(rel); devlink->rel = NULL; } void devlink_rel_nested_in_clear(u32 rel_index) { xa_clear_mark(&devlink_rels, rel_index, DEVLINK_REL_IN_USE); } int devlink_rel_nested_in_add(u32 *rel_index, u32 devlink_index, u32 obj_index, devlink_rel_notify_cb_t *notify_cb, devlink_rel_cleanup_cb_t *cleanup_cb, struct devlink *devlink) { struct devlink_rel *rel = devlink_rel_alloc(); ASSERT_DEVLINK_NOT_REGISTERED(devlink); if (IS_ERR(rel)) return PTR_ERR(rel); rel->devlink_index = devlink->index; rel->nested_in.devlink_index = devlink_index; rel->nested_in.obj_index = obj_index; rel->nested_in.notify_cb = notify_cb; rel->nested_in.cleanup_cb = cleanup_cb; *rel_index = rel->index; xa_set_mark(&devlink_rels, rel->index, DEVLINK_REL_IN_USE); devlink->rel = rel; return 0; } /** * devlink_rel_nested_in_notify - Notify the object this devlink * instance is nested in. * @devlink: devlink * * This is called upon network namespace change of devlink instance. * In case this devlink instance is nested in another devlink object, * a notification of a change of this object should be sent * over netlink. The parent devlink instance lock needs to be * taken during the notification preparation. * However, since the devlink lock of nested instance is held here, * we would end with wrong devlink instance lock ordering and * deadlock. Therefore the work is utilized to avoid that. */ void devlink_rel_nested_in_notify(struct devlink *devlink) { struct devlink_rel *rel = devlink->rel; if (!rel) return; devlink_rel_nested_in_notify_work_schedule(rel); } static struct devlink_rel *devlink_rel_find(unsigned long rel_index) { return xa_find(&devlink_rels, &rel_index, rel_index, DEVLINK_REL_IN_USE); } static struct devlink *devlink_rel_devlink_get(u32 rel_index) { struct devlink_rel *rel; u32 devlink_index; if (!rel_index) return NULL; xa_lock(&devlink_rels); rel = devlink_rel_find(rel_index); if (rel) devlink_index = rel->devlink_index; xa_unlock(&devlink_rels); if (!rel) return NULL; return devlinks_xa_get(devlink_index); } int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink, u32 rel_index, int attrtype, bool *msg_updated) { struct net *net = devlink_net(devlink); struct devlink *rel_devlink; int err; rel_devlink = devlink_rel_devlink_get(rel_index); if (!rel_devlink) return 0; err = devlink_nl_put_nested_handle(msg, net, rel_devlink, attrtype); devlink_put(rel_devlink); if (!err && msg_updated) *msg_updated = true; return err; } void *devlink_priv(struct devlink *devlink) { return &devlink->priv; } EXPORT_SYMBOL_GPL(devlink_priv); struct devlink *priv_to_devlink(void *priv) { return container_of(priv, struct devlink, priv); } EXPORT_SYMBOL_GPL(priv_to_devlink); struct device *devlink_to_dev(const struct devlink *devlink) { return devlink->dev; } EXPORT_SYMBOL_GPL(devlink_to_dev); struct net *devlink_net(const struct devlink *devlink) { return read_pnet(&devlink->_net); } EXPORT_SYMBOL_GPL(devlink_net); void devl_assert_locked(struct devlink *devlink) { lockdep_assert_held(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_assert_locked); #ifdef CONFIG_LOCKDEP /* For use in conjunction with LOCKDEP only e.g. rcu_dereference_protected() */ bool devl_lock_is_held(struct devlink *devlink) { return lockdep_is_held(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_lock_is_held); #endif void devl_lock(struct devlink *devlink) { mutex_lock(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_lock); int devl_trylock(struct devlink *devlink) { return mutex_trylock(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_trylock); void devl_unlock(struct devlink *devlink) { mutex_unlock(&devlink->lock); } EXPORT_SYMBOL_GPL(devl_unlock); /** * devlink_try_get() - try to obtain a reference on a devlink instance * @devlink: instance to reference * * Obtain a reference on a devlink instance. A reference on a devlink instance * only implies that it's safe to take the instance lock. It does not imply * that the instance is registered, use devl_is_registered() after taking * the instance lock to check registration status. */ struct devlink *__must_check devlink_try_get(struct devlink *devlink) { if (refcount_inc_not_zero(&devlink->refcount)) return devlink; return NULL; } static void devlink_release(struct work_struct *work) { struct devlink *devlink; devlink = container_of(to_rcu_work(work), struct devlink, rwork); mutex_destroy(&devlink->lock); lockdep_unregister_key(&devlink->lock_key); put_device(devlink->dev); kvfree(devlink); } void devlink_put(struct devlink *devlink) { if (refcount_dec_and_test(&devlink->refcount)) queue_rcu_work(system_wq, &devlink->rwork); } struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp) { struct devlink *devlink = NULL; rcu_read_lock(); retry: devlink = xa_find(&devlinks, indexp, ULONG_MAX, DEVLINK_REGISTERED); if (!devlink) goto unlock; if (!devlink_try_get(devlink)) goto next; if (!net_eq(devlink_net(devlink), net)) { devlink_put(devlink); goto next; } unlock: rcu_read_unlock(); return devlink; next: (*indexp)++; goto retry; } /** * devl_register - Register devlink instance * @devlink: devlink */ int devl_register(struct devlink *devlink) { ASSERT_DEVLINK_NOT_REGISTERED(devlink); devl_assert_locked(devlink); xa_set_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); devlink_notify_register(devlink); devlink_rel_nested_in_notify(devlink); return 0; } EXPORT_SYMBOL_GPL(devl_register); void devlink_register(struct devlink *devlink) { devl_lock(devlink); devl_register(devlink); devl_unlock(devlink); } EXPORT_SYMBOL_GPL(devlink_register); /** * devl_unregister - Unregister devlink instance * @devlink: devlink */ void devl_unregister(struct devlink *devlink) { ASSERT_DEVLINK_REGISTERED(devlink); devl_assert_locked(devlink); devlink_notify_unregister(devlink); xa_clear_mark(&devlinks, devlink->index, DEVLINK_REGISTERED); devlink_rel_put(devlink); } EXPORT_SYMBOL_GPL(devl_unregister); void devlink_unregister(struct devlink *devlink) { devl_lock(devlink); devl_unregister(devlink); devl_unlock(devlink); } EXPORT_SYMBOL_GPL(devlink_unregister); /** * devlink_alloc_ns - Allocate new devlink instance resources * in specific namespace * * @ops: ops * @priv_size: size of user private data * @net: net namespace * @dev: parent device * * Allocate new devlink instance resources, including devlink index * and name. */ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops, size_t priv_size, struct net *net, struct device *dev) { struct devlink *devlink; static u32 last_id; int ret; WARN_ON(!ops || !dev); if (!devlink_reload_actions_valid(ops)) return NULL; devlink = kvzalloc(struct_size(devlink, priv, priv_size), GFP_KERNEL); if (!devlink) return NULL; ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b, &last_id, GFP_KERNEL); if (ret < 0) goto err_xa_alloc; devlink->dev = get_device(dev); devlink->ops = ops; xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC); xa_init_flags(&devlink->params, XA_FLAGS_ALLOC); xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC); xa_init_flags(&devlink->nested_rels, XA_FLAGS_ALLOC); write_pnet(&devlink->_net, net); INIT_LIST_HEAD(&devlink->rate_list); INIT_LIST_HEAD(&devlink->linecard_list); INIT_LIST_HEAD(&devlink->sb_list); INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list); INIT_LIST_HEAD(&devlink->resource_list); INIT_LIST_HEAD(&devlink->region_list); INIT_LIST_HEAD(&devlink->reporter_list); INIT_LIST_HEAD(&devlink->trap_list); INIT_LIST_HEAD(&devlink->trap_group_list); INIT_LIST_HEAD(&devlink->trap_policer_list); INIT_RCU_WORK(&devlink->rwork, devlink_release); lockdep_register_key(&devlink->lock_key); mutex_init(&devlink->lock); lockdep_set_class(&devlink->lock, &devlink->lock_key); refcount_set(&devlink->refcount, 1); return devlink; err_xa_alloc: kvfree(devlink); return NULL; } EXPORT_SYMBOL_GPL(devlink_alloc_ns); /** * devlink_free - Free devlink instance resources * * @devlink: devlink */ void devlink_free(struct devlink *devlink) { ASSERT_DEVLINK_NOT_REGISTERED(devlink); WARN_ON(!list_empty(&devlink->trap_policer_list)); WARN_ON(!list_empty(&devlink->trap_group_list)); WARN_ON(!list_empty(&devlink->trap_list)); WARN_ON(!list_empty(&devlink->reporter_list)); WARN_ON(!list_empty(&devlink->region_list)); WARN_ON(!list_empty(&devlink->resource_list)); WARN_ON(!list_empty(&devlink->dpipe_table_list)); WARN_ON(!list_empty(&devlink->sb_list)); WARN_ON(!list_empty(&devlink->rate_list)); WARN_ON(!list_empty(&devlink->linecard_list)); WARN_ON(!xa_empty(&devlink->ports)); xa_destroy(&devlink->nested_rels); xa_destroy(&devlink->snapshot_ids); xa_destroy(&devlink->params); xa_destroy(&devlink->ports); xa_erase(&devlinks, devlink->index); devlink_put(devlink); } EXPORT_SYMBOL_GPL(devlink_free); static void __net_exit devlink_pernet_pre_exit(struct net *net) { struct devlink *devlink; u32 actions_performed; unsigned long index; int err; /* In case network namespace is getting destroyed, reload * all devlink instances from this namespace into init_net. */ devlinks_xa_for_each_registered_get(net, index, devlink) { devl_dev_lock(devlink, true); err = 0; if (devl_is_registered(devlink)) err = devlink_reload(devlink, &init_net, DEVLINK_RELOAD_ACTION_DRIVER_REINIT, DEVLINK_RELOAD_LIMIT_UNSPEC, &actions_performed, NULL); devl_dev_unlock(devlink, true); devlink_put(devlink); if (err && err != -EOPNOTSUPP) pr_warn("Failed to reload devlink instance into init_net\n"); } } static struct pernet_operations devlink_pernet_ops __net_initdata = { .pre_exit = devlink_pernet_pre_exit, }; static struct notifier_block devlink_port_netdevice_nb = { .notifier_call = devlink_port_netdevice_event, }; static int __init devlink_init(void) { int err; err = register_pernet_subsys(&devlink_pernet_ops); if (err) goto out; err = genl_register_family(&devlink_nl_family); if (err) goto out_unreg_pernet_subsys; err = register_netdevice_notifier(&devlink_port_netdevice_nb); if (!err) return 0; genl_unregister_family(&devlink_nl_family); out_unreg_pernet_subsys: unregister_pernet_subsys(&devlink_pernet_ops); out: WARN_ON(err); return err; } subsys_initcall(devlink_init);
1 1 2 2 2 1 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Betop based devices * * The devices are distributed under various names and the same USB device ID * can be used in both adapters and actual game controllers. * * 0x11c2:0x2208 "BTP2185 BFM mode Joystick" * - tested with BTP2185 BFM Mode. * * 0x11C0:0x5506 "BTP2185 PC mode Joystick" * - tested with BTP2185 PC Mode. * * 0x8380:0x1850 "BTP2185 V2 PC mode USB Gamepad" * - tested with BTP2185 PC Mode with another version. * * 0x20bc:0x5500 "BTP2185 V2 BFM mode Joystick" * - tested with BTP2171s. * Copyright (c) 2014 Huang Bo <huangbobupt@163.com> */ /* */ #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/hid.h> #include "hid-ids.h" struct betopff_device { struct hid_report *report; }; static int hid_betopff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct betopff_device *betopff = data; __u16 left, right; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; betopff->report->field[2]->value[0] = left / 256; betopff->report->field[3]->value[0] = right / 256; hid_hw_request(hid, betopff->report, HID_REQ_SET_REPORT); return 0; } static int betopff_init(struct hid_device *hid) { struct betopff_device *betopff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev; int error; int i, j; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_first_entry(&hid->inputs, struct hid_input, list); dev = hidinput->input; if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } report = list_first_entry(report_list, struct hid_report, list); /* * Actually there are 4 fields for 4 Bytes as below: * ----------------------------------------- * Byte0 Byte1 Byte2 Byte3 * 0x00 0x00 left_motor right_motor * ----------------------------------------- * Do init them with default value. */ if (report->maxfield < 4) { hid_err(hid, "not enough fields in the report: %d\n", report->maxfield); return -ENODEV; } for (i = 0; i < report->maxfield; i++) { if (report->field[i]->report_count < 1) { hid_err(hid, "no values in the field\n"); return -ENODEV; } for (j = 0; j < report->field[i]->report_count; j++) { report->field[i]->value[j] = 0x00; } } betopff = kzalloc(sizeof(*betopff), GFP_KERNEL); if (!betopff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, betopff, hid_betopff_play); if (error) { kfree(betopff); return error; } betopff->report = report; hid_hw_request(hid, betopff->report, HID_REQ_SET_REPORT); hid_info(hid, "Force feedback for betop devices by huangbo <huangbobupt@163.com>\n"); return 0; } static int betop_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; if (id->driver_data) hdev->quirks |= HID_QUIRK_MULTI_INPUT; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } betopff_init(hdev); return 0; err: return ret; } static const struct hid_device_id betop_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, { } }; MODULE_DEVICE_TABLE(hid, betop_devices); static struct hid_driver betop_driver = { .name = "betop", .id_table = betop_devices, .probe = betop_probe, }; module_hid_driver(betop_driver); MODULE_DESCRIPTION("Force feedback support for Betop based devices"); MODULE_LICENSE("GPL");
3 3 1 3 2 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some petalynx "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* Petalynx Maxter Remote has maximum for consumer page set too low */ static const __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && rdesc[41] == 0x00 && rdesc[59] == 0x26 && rdesc[60] == 0xf9 && rdesc[61] == 0x00) { hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n"); rdesc[60] = 0xfa; rdesc[40] = 0xfa; } return rdesc; } #define pl_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int pl_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) == HID_UP_LOGIVENDOR) { switch (usage->hid & HID_USAGE) { case 0x05a: pl_map_key_clear(KEY_TEXT); break; case 0x05b: pl_map_key_clear(KEY_RED); break; case 0x05c: pl_map_key_clear(KEY_GREEN); break; case 0x05d: pl_map_key_clear(KEY_YELLOW); break; case 0x05e: pl_map_key_clear(KEY_BLUE); break; default: return 0; } return 1; } if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) { switch (usage->hid & HID_USAGE) { case 0x0f6: pl_map_key_clear(KEY_NEXT); break; case 0x0fa: pl_map_key_clear(KEY_BACK); break; default: return 0; } return 1; } return 0; } static int pl_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hdev->quirks |= HID_QUIRK_NOGET; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: return ret; } static const struct hid_device_id pl_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { } }; MODULE_DEVICE_TABLE(hid, pl_devices); static struct hid_driver pl_driver = { .name = "petalynx", .id_table = pl_devices, .report_fixup = pl_report_fixup, .input_mapping = pl_input_mapping, .probe = pl_probe, }; module_hid_driver(pl_driver); MODULE_DESCRIPTION("HID driver for some petalynx \"special\" devices"); MODULE_LICENSE("GPL");
8 7 1 8 2 6 8 8 6 6 6 3 6 6 5 5 6 14 13 8 7 1 8 6 13 2 7 13 1 1 8 8 8 7 7 1 8 2 15 15 14 14 14 14 14 1 13 8 11 6 8 15 13 14 14 8 14 5 14 5 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 // SPDX-License-Identifier: GPL-2.0-only /* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (line6@grabner-graz.at) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/hwdep.h> #include "capture.h" #include "driver.h" #include "midi.h" #include "playback.h" #define DRIVER_AUTHOR "Markus Grabner <line6@grabner-graz.at>" #define DRIVER_DESC "Line 6 USB Driver" /* This is Line 6's MIDI manufacturer ID. */ const unsigned char line6_midi_id[3] = { 0x00, 0x01, 0x0c }; EXPORT_SYMBOL_GPL(line6_midi_id); /* Code to request version of POD, Variax interface (and maybe other devices). */ static const char line6_request_version[] = { 0xf0, 0x7e, 0x7f, 0x06, 0x01, 0xf7 }; /* Class for asynchronous messages. */ struct message { struct usb_line6 *line6; const char *buffer; int size; int done; }; /* Forward declarations. */ static void line6_data_received(struct urb *urb); static int line6_send_raw_message_async_part(struct message *msg, struct urb *urb); /* Start to listen on endpoint. */ static int line6_start_listen(struct usb_line6 *line6) { int err; if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { usb_fill_int_urb(line6->urb_listen, line6->usbdev, usb_rcvintpipe(line6->usbdev, line6->properties->ep_ctrl_r), line6->buffer_listen, LINE6_BUFSIZE_LISTEN, line6_data_received, line6, line6->interval); } else { usb_fill_bulk_urb(line6->urb_listen, line6->usbdev, usb_rcvbulkpipe(line6->usbdev, line6->properties->ep_ctrl_r), line6->buffer_listen, LINE6_BUFSIZE_LISTEN, line6_data_received, line6); } /* sanity checks of EP before actually submitting */ if (usb_urb_ep_type_check(line6->urb_listen)) { dev_err(line6->ifcdev, "invalid control EP\n"); return -EINVAL; } line6->urb_listen->actual_length = 0; err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC); return err; } /* Stop listening on endpoint. */ static void line6_stop_listen(struct usb_line6 *line6) { usb_kill_urb(line6->urb_listen); } /* Send raw message in pieces of wMaxPacketSize bytes. */ int line6_send_raw_message(struct usb_line6 *line6, const char *buffer, int size) { int i, done = 0; const struct line6_properties *properties = line6->properties; for (i = 0; i < size; i += line6->max_packet_size) { int partial; const char *frag_buf = buffer + i; int frag_size = min(line6->max_packet_size, size - i); int retval; if (properties->capabilities & LINE6_CAP_CONTROL_MIDI) { retval = usb_interrupt_msg(line6->usbdev, usb_sndintpipe(line6->usbdev, properties->ep_ctrl_w), (char *)frag_buf, frag_size, &partial, LINE6_TIMEOUT); } else { retval = usb_bulk_msg(line6->usbdev, usb_sndbulkpipe(line6->usbdev, properties->ep_ctrl_w), (char *)frag_buf, frag_size, &partial, LINE6_TIMEOUT); } if (retval) { dev_err(line6->ifcdev, "usb_bulk_msg failed (%d)\n", retval); break; } done += frag_size; } return done; } EXPORT_SYMBOL_GPL(line6_send_raw_message); /* Notification of completion of asynchronous request transmission. */ static void line6_async_request_sent(struct urb *urb) { struct message *msg = (struct message *)urb->context; if (msg->done >= msg->size) { usb_free_urb(urb); kfree(msg); } else line6_send_raw_message_async_part(msg, urb); } /* Asynchronously send part of a raw message. */ static int line6_send_raw_message_async_part(struct message *msg, struct urb *urb) { int retval; struct usb_line6 *line6 = msg->line6; int done = msg->done; int bytes = min(msg->size - done, line6->max_packet_size); if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { usb_fill_int_urb(urb, line6->usbdev, usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w), (char *)msg->buffer + done, bytes, line6_async_request_sent, msg, line6->interval); } else { usb_fill_bulk_urb(urb, line6->usbdev, usb_sndbulkpipe(line6->usbdev, line6->properties->ep_ctrl_w), (char *)msg->buffer + done, bytes, line6_async_request_sent, msg); } msg->done += bytes; /* sanity checks of EP before actually submitting */ retval = usb_urb_ep_type_check(urb); if (retval < 0) goto error; retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval < 0) goto error; return 0; error: dev_err(line6->ifcdev, "%s: usb_submit_urb failed (%d)\n", __func__, retval); usb_free_urb(urb); kfree(msg); return retval; } /* Asynchronously send raw message. */ int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer, int size) { struct message *msg; struct urb *urb; /* create message: */ msg = kzalloc(sizeof(struct message), GFP_ATOMIC); if (msg == NULL) return -ENOMEM; /* create URB: */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (urb == NULL) { kfree(msg); return -ENOMEM; } /* set message data: */ msg->line6 = line6; msg->buffer = buffer; msg->size = size; msg->done = 0; /* start sending: */ return line6_send_raw_message_async_part(msg, urb); } EXPORT_SYMBOL_GPL(line6_send_raw_message_async); /* Send asynchronous device version request. */ int line6_version_request_async(struct usb_line6 *line6) { char *buffer; int retval; buffer = kmemdup(line6_request_version, sizeof(line6_request_version), GFP_ATOMIC); if (buffer == NULL) return -ENOMEM; retval = line6_send_raw_message_async(line6, buffer, sizeof(line6_request_version)); kfree(buffer); return retval; } EXPORT_SYMBOL_GPL(line6_version_request_async); /* Send sysex message in pieces of wMaxPacketSize bytes. */ int line6_send_sysex_message(struct usb_line6 *line6, const char *buffer, int size) { return line6_send_raw_message(line6, buffer, size + SYSEX_EXTRA_SIZE) - SYSEX_EXTRA_SIZE; } EXPORT_SYMBOL_GPL(line6_send_sysex_message); /* Allocate buffer for sysex message and prepare header. @param code sysex message code @param size number of bytes between code and sysex end */ char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, int code2, int size) { char *buffer = kmalloc(size + SYSEX_EXTRA_SIZE, GFP_ATOMIC); if (!buffer) return NULL; buffer[0] = LINE6_SYSEX_BEGIN; memcpy(buffer + 1, line6_midi_id, sizeof(line6_midi_id)); buffer[sizeof(line6_midi_id) + 1] = code1; buffer[sizeof(line6_midi_id) + 2] = code2; buffer[sizeof(line6_midi_id) + 3 + size] = LINE6_SYSEX_END; return buffer; } EXPORT_SYMBOL_GPL(line6_alloc_sysex_buffer); /* Notification of data received from the Line 6 device. */ static void line6_data_received(struct urb *urb) { struct usb_line6 *line6 = (struct usb_line6 *)urb->context; struct midi_buffer *mb = &line6->line6midi->midibuf_in; unsigned long flags; int done; if (urb->status == -ESHUTDOWN) return; if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { spin_lock_irqsave(&line6->line6midi->lock, flags); done = line6_midibuf_write(mb, urb->transfer_buffer, urb->actual_length); if (done < urb->actual_length) { line6_midibuf_ignore(mb, done); dev_dbg(line6->ifcdev, "%d %d buffer overflow - message skipped\n", done, urb->actual_length); } spin_unlock_irqrestore(&line6->line6midi->lock, flags); for (;;) { spin_lock_irqsave(&line6->line6midi->lock, flags); done = line6_midibuf_read(mb, line6->buffer_message, LINE6_MIDI_MESSAGE_MAXLEN, LINE6_MIDIBUF_READ_RX); spin_unlock_irqrestore(&line6->line6midi->lock, flags); if (done <= 0) break; line6->message_length = done; line6_midi_receive(line6, line6->buffer_message, done); if (line6->process_message) line6->process_message(line6); } } else { line6->buffer_message = urb->transfer_buffer; line6->message_length = urb->actual_length; if (line6->process_message) line6->process_message(line6); line6->buffer_message = NULL; } line6_start_listen(line6); } #define LINE6_READ_WRITE_STATUS_DELAY 2 /* milliseconds */ #define LINE6_READ_WRITE_MAX_RETRIES 50 /* Read data from device. */ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, unsigned datalen) { struct usb_device *usbdev = line6->usbdev; int ret; u8 len; unsigned count; if (address > 0xffff || datalen > 0xff) return -EINVAL; /* query the serial number: */ ret = usb_control_msg_send(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, (datalen << 8) | 0x21, address, NULL, 0, LINE6_TIMEOUT, GFP_KERNEL); if (ret) { dev_err(line6->ifcdev, "read request failed (error %d)\n", ret); goto exit; } /* Wait for data length. We'll get 0xff until length arrives. */ for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) { mdelay(LINE6_READ_WRITE_STATUS_DELAY); ret = usb_control_msg_recv(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0012, 0x0000, &len, 1, LINE6_TIMEOUT, GFP_KERNEL); if (ret) { dev_err(line6->ifcdev, "receive length failed (error %d)\n", ret); goto exit; } if (len != 0xff) break; } ret = -EIO; if (len == 0xff) { dev_err(line6->ifcdev, "read failed after %d retries\n", count); goto exit; } else if (len != datalen) { /* should be equal or something went wrong */ dev_err(line6->ifcdev, "length mismatch (expected %d, got %d)\n", (int)datalen, len); goto exit; } /* receive the result: */ ret = usb_control_msg_recv(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0013, 0x0000, data, datalen, LINE6_TIMEOUT, GFP_KERNEL); if (ret) dev_err(line6->ifcdev, "read failed (error %d)\n", ret); exit: return ret; } EXPORT_SYMBOL_GPL(line6_read_data); /* Write data to device. */ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, unsigned datalen) { struct usb_device *usbdev = line6->usbdev; int ret; unsigned char *status; int count; if (address > 0xffff || datalen > 0xffff) return -EINVAL; status = kmalloc(1, GFP_KERNEL); if (!status) return -ENOMEM; ret = usb_control_msg_send(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0022, address, data, datalen, LINE6_TIMEOUT, GFP_KERNEL); if (ret) { dev_err(line6->ifcdev, "write request failed (error %d)\n", ret); goto exit; } for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) { mdelay(LINE6_READ_WRITE_STATUS_DELAY); ret = usb_control_msg_recv(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0012, 0x0000, status, 1, LINE6_TIMEOUT, GFP_KERNEL); if (ret) { dev_err(line6->ifcdev, "receiving status failed (error %d)\n", ret); goto exit; } if (*status != 0xff) break; } if (*status == 0xff) { dev_err(line6->ifcdev, "write failed after %d retries\n", count); ret = -EIO; } else if (*status != 0) { dev_err(line6->ifcdev, "write failed (error %d)\n", ret); ret = -EIO; } exit: kfree(status); return ret; } EXPORT_SYMBOL_GPL(line6_write_data); /* Read Line 6 device serial number. (POD, TonePort, GuitarPort) */ int line6_read_serial_number(struct usb_line6 *line6, u32 *serial_number) { return line6_read_data(line6, 0x80d0, serial_number, sizeof(*serial_number)); } EXPORT_SYMBOL_GPL(line6_read_serial_number); /* Card destructor. */ static void line6_destruct(struct snd_card *card) { struct usb_line6 *line6 = card->private_data; struct usb_device *usbdev = line6->usbdev; /* Free buffer memory first. We cannot depend on the existence of private * data from the (podhd) module, it may be gone already during this call */ kfree(line6->buffer_message); kfree(line6->buffer_listen); /* then free URBs: */ usb_free_urb(line6->urb_listen); line6->urb_listen = NULL; /* decrement reference counters: */ usb_put_dev(usbdev); } static void line6_get_usb_properties(struct usb_line6 *line6) { struct usb_device *usbdev = line6->usbdev; const struct line6_properties *properties = line6->properties; int pipe; struct usb_host_endpoint *ep = NULL; if (properties->capabilities & LINE6_CAP_CONTROL) { if (properties->capabilities & LINE6_CAP_CONTROL_MIDI) { pipe = usb_rcvintpipe(line6->usbdev, line6->properties->ep_ctrl_r); } else { pipe = usb_rcvbulkpipe(line6->usbdev, line6->properties->ep_ctrl_r); } ep = usbdev->ep_in[usb_pipeendpoint(pipe)]; } /* Control data transfer properties */ if (ep) { line6->interval = ep->desc.bInterval; line6->max_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); } else { if (properties->capabilities & LINE6_CAP_CONTROL) { dev_err(line6->ifcdev, "endpoint not available, using fallback values"); } line6->interval = LINE6_FALLBACK_INTERVAL; line6->max_packet_size = LINE6_FALLBACK_MAXPACKETSIZE; } /* Isochronous transfer properties */ if (usbdev->speed == USB_SPEED_LOW) { line6->intervals_per_second = USB_LOW_INTERVALS_PER_SECOND; line6->iso_buffers = USB_LOW_ISO_BUFFERS; } else { line6->intervals_per_second = USB_HIGH_INTERVALS_PER_SECOND; line6->iso_buffers = USB_HIGH_ISO_BUFFERS; } } /* Enable buffering of incoming messages, flush the buffer */ static int line6_hwdep_open(struct snd_hwdep *hw, struct file *file) { struct usb_line6 *line6 = hw->private_data; /* NOTE: hwdep layer provides atomicity here */ line6->messages.active = 1; line6->messages.nonblock = file->f_flags & O_NONBLOCK ? 1 : 0; return 0; } /* Stop buffering */ static int line6_hwdep_release(struct snd_hwdep *hw, struct file *file) { struct usb_line6 *line6 = hw->private_data; line6->messages.active = 0; return 0; } /* Read from circular buffer, return to user */ static long line6_hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, loff_t *offset) { struct usb_line6 *line6 = hwdep->private_data; long rv = 0; unsigned int out_count; if (mutex_lock_interruptible(&line6->messages.read_lock)) return -ERESTARTSYS; while (kfifo_len(&line6->messages.fifo) == 0) { mutex_unlock(&line6->messages.read_lock); if (line6->messages.nonblock) return -EAGAIN; rv = wait_event_interruptible( line6->messages.wait_queue, kfifo_len(&line6->messages.fifo) != 0); if (rv < 0) return rv; if (mutex_lock_interruptible(&line6->messages.read_lock)) return -ERESTARTSYS; } if (kfifo_peek_len(&line6->messages.fifo) > count) { /* Buffer too small; allow re-read of the current item... */ rv = -EINVAL; } else { rv = kfifo_to_user(&line6->messages.fifo, buf, count, &out_count); if (rv == 0) rv = out_count; } mutex_unlock(&line6->messages.read_lock); return rv; } /* Write directly (no buffering) to device by user*/ static long line6_hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count, loff_t *offset) { struct usb_line6 *line6 = hwdep->private_data; int rv; char *data_copy; if (count > line6->max_packet_size * LINE6_RAW_MESSAGES_MAXCOUNT) { /* This is an arbitrary limit - still better than nothing... */ return -EINVAL; } data_copy = memdup_user(data, count); if (IS_ERR(data_copy)) return PTR_ERR(data_copy); rv = line6_send_raw_message(line6, data_copy, count); kfree(data_copy); return rv; } static __poll_t line6_hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait) { __poll_t rv; struct usb_line6 *line6 = hwdep->private_data; poll_wait(file, &line6->messages.wait_queue, wait); mutex_lock(&line6->messages.read_lock); rv = kfifo_len(&line6->messages.fifo) == 0 ? 0 : EPOLLIN | EPOLLRDNORM; mutex_unlock(&line6->messages.read_lock); return rv; } static const struct snd_hwdep_ops hwdep_ops = { .open = line6_hwdep_open, .release = line6_hwdep_release, .read = line6_hwdep_read, .write = line6_hwdep_write, .poll = line6_hwdep_poll, }; /* Insert into circular buffer */ static void line6_hwdep_push_message(struct usb_line6 *line6) { if (!line6->messages.active) return; if (kfifo_avail(&line6->messages.fifo) >= line6->message_length) { /* No race condition here, there's only one writer */ kfifo_in(&line6->messages.fifo, line6->buffer_message, line6->message_length); } /* else TODO: signal overflow */ wake_up_interruptible(&line6->messages.wait_queue); } static int line6_hwdep_init(struct usb_line6 *line6) { int err; struct snd_hwdep *hwdep; /* TODO: usb_driver_claim_interface(); */ line6->process_message = line6_hwdep_push_message; line6->messages.active = 0; init_waitqueue_head(&line6->messages.wait_queue); mutex_init(&line6->messages.read_lock); INIT_KFIFO(line6->messages.fifo); err = snd_hwdep_new(line6->card, "config", 0, &hwdep); if (err < 0) goto end; strcpy(hwdep->name, "config"); hwdep->iface = SNDRV_HWDEP_IFACE_LINE6; hwdep->ops = hwdep_ops; hwdep->private_data = line6; hwdep->exclusive = true; end: return err; } static int line6_init_cap_control(struct usb_line6 *line6) { int ret; /* initialize USB buffers: */ line6->buffer_listen = kzalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL); if (!line6->buffer_listen) return -ENOMEM; line6->urb_listen = usb_alloc_urb(0, GFP_KERNEL); if (!line6->urb_listen) return -ENOMEM; if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { line6->buffer_message = kzalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL); if (!line6->buffer_message) return -ENOMEM; ret = line6_init_midi(line6); if (ret < 0) return ret; } else { ret = line6_hwdep_init(line6); if (ret < 0) return ret; } ret = line6_start_listen(line6); if (ret < 0) { dev_err(line6->ifcdev, "cannot start listening: %d\n", ret); return ret; } return 0; } static void line6_startup_work(struct work_struct *work) { struct usb_line6 *line6 = container_of(work, struct usb_line6, startup_work.work); if (line6->startup) line6->startup(line6); } /* Probe USB device. */ int line6_probe(struct usb_interface *interface, const struct usb_device_id *id, const char *driver_name, const struct line6_properties *properties, int (*private_init)(struct usb_line6 *, const struct usb_device_id *id), size_t data_size) { struct usb_device *usbdev = interface_to_usbdev(interface); struct snd_card *card; struct usb_line6 *line6; int interface_number; int ret; if (WARN_ON(data_size < sizeof(*line6))) return -EINVAL; /* we don't handle multiple configurations */ if (usbdev->descriptor.bNumConfigurations != 1) return -ENODEV; ret = snd_card_new(&interface->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, data_size, &card); if (ret < 0) return ret; /* store basic data: */ line6 = card->private_data; line6->card = card; line6->properties = properties; line6->usbdev = usbdev; line6->ifcdev = &interface->dev; INIT_DELAYED_WORK(&line6->startup_work, line6_startup_work); strcpy(card->id, properties->id); strcpy(card->driver, driver_name); strcpy(card->shortname, properties->name); sprintf(card->longname, "Line 6 %s at USB %s", properties->name, dev_name(line6->ifcdev)); card->private_free = line6_destruct; usb_set_intfdata(interface, line6); /* increment reference counters: */ usb_get_dev(usbdev); /* initialize device info: */ dev_info(&interface->dev, "Line 6 %s found\n", properties->name); /* query interface number */ interface_number = interface->cur_altsetting->desc.bInterfaceNumber; /* TODO reserves the bus bandwidth even without actual transfer */ ret = usb_set_interface(usbdev, interface_number, properties->altsetting); if (ret < 0) { dev_err(&interface->dev, "set_interface failed\n"); goto error; } line6_get_usb_properties(line6); if (properties->capabilities & LINE6_CAP_CONTROL) { ret = line6_init_cap_control(line6); if (ret < 0) goto error; } /* initialize device data based on device: */ ret = private_init(line6, id); if (ret < 0) goto error; /* creation of additional special files should go here */ dev_info(&interface->dev, "Line 6 %s now attached\n", properties->name); return 0; error: /* we can call disconnect callback here because no close-sync is * needed yet at this point */ line6_disconnect(interface); return ret; } EXPORT_SYMBOL_GPL(line6_probe); /* Line 6 device disconnected. */ void line6_disconnect(struct usb_interface *interface) { struct usb_line6 *line6 = usb_get_intfdata(interface); struct usb_device *usbdev = interface_to_usbdev(interface); if (!line6) return; if (WARN_ON(usbdev != line6->usbdev)) return; cancel_delayed_work_sync(&line6->startup_work); if (line6->urb_listen != NULL) line6_stop_listen(line6); snd_card_disconnect(line6->card); if (line6->line6pcm) line6_pcm_disconnect(line6->line6pcm); if (line6->disconnect) line6->disconnect(line6); dev_info(&interface->dev, "Line 6 %s now disconnected\n", line6->properties->name); /* make sure the device isn't destructed twice: */ usb_set_intfdata(interface, NULL); snd_card_free_when_closed(line6->card); } EXPORT_SYMBOL_GPL(line6_disconnect); #ifdef CONFIG_PM /* Suspend Line 6 device. */ int line6_suspend(struct usb_interface *interface, pm_message_t message) { struct usb_line6 *line6 = usb_get_intfdata(interface); struct snd_line6_pcm *line6pcm = line6->line6pcm; snd_power_change_state(line6->card, SNDRV_CTL_POWER_D3hot); if (line6->properties->capabilities & LINE6_CAP_CONTROL) line6_stop_listen(line6); if (line6pcm != NULL) line6pcm->flags = 0; return 0; } EXPORT_SYMBOL_GPL(line6_suspend); /* Resume Line 6 device. */ int line6_resume(struct usb_interface *interface) { struct usb_line6 *line6 = usb_get_intfdata(interface); if (line6->properties->capabilities & LINE6_CAP_CONTROL) line6_start_listen(line6); snd_power_change_state(line6->card, SNDRV_CTL_POWER_D0); return 0; } EXPORT_SYMBOL_GPL(line6_resume); #endif /* CONFIG_PM */ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
4 3108 3273 5 8 3273 13 3191 521 3196 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_BITOPS_H #define _ASM_X86_BITOPS_H /* * Copyright 1992, Linus Torvalds. * * Note: inlines with more than a single statement should be marked * __always_inline to avoid problems with older gcc's inlining heuristics. */ #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #include <linux/compiler.h> #include <asm/alternative.h> #include <asm/rmwcc.h> #include <asm/barrier.h> #if BITS_PER_LONG == 32 # define _BITOPS_LONG_SHIFT 5 #elif BITS_PER_LONG == 64 # define _BITOPS_LONG_SHIFT 6 #else # error "Unexpected BITS_PER_LONG" #endif #define BIT_64(n) (U64_C(1) << (n)) /* * These have to be done with inline assembly: that way the bit-setting * is guaranteed to be atomic. All bit operations return 0 if the bit * was cleared before the operation and != 0 if it was not. * * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ #define RLONG_ADDR(x) "m" (*(volatile long *) (x)) #define WBYTE_ADDR(x) "+m" (*(volatile char *) (x)) #define ADDR RLONG_ADDR(addr) /* * We do the locked ops that don't return the old value as * a mask operation on a byte. */ #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3)) #define CONST_MASK(nr) (1 << ((nr) & 7)) static __always_inline void arch_set_bit(long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { asm_inline volatile(LOCK_PREFIX "orb %b1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" (CONST_MASK(nr)) : "memory"); } else { asm_inline volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } static __always_inline void arch___set_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } static __always_inline void arch_clear_bit(long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { asm_inline volatile(LOCK_PREFIX "andb %b1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" (~CONST_MASK(nr))); } else { asm_inline volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } static __always_inline void arch_clear_bit_unlock(long nr, volatile unsigned long *addr) { barrier(); arch_clear_bit(nr, addr); } static __always_inline void arch___clear_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, volatile unsigned long *addr) { bool negative; asm_inline volatile(LOCK_PREFIX "xorb %2,%1" CC_SET(s) : CC_OUT(s) (negative), WBYTE_ADDR(addr) : "iq" ((char)mask) : "memory"); return negative; } #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte static __always_inline void arch___clear_bit_unlock(long nr, volatile unsigned long *addr) { arch___clear_bit(nr, addr); } static __always_inline void arch___change_bit(unsigned long nr, volatile unsigned long *addr) { asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } static __always_inline void arch_change_bit(long nr, volatile unsigned long *addr) { if (__builtin_constant_p(nr)) { asm_inline volatile(LOCK_PREFIX "xorb %b1,%0" : CONST_MASK_ADDR(nr, addr) : "iq" (CONST_MASK(nr))); } else { asm_inline volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" : : RLONG_ADDR(addr), "Ir" (nr) : "memory"); } } static __always_inline bool arch_test_and_set_bit(long nr, volatile unsigned long *addr) { return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); } static __always_inline bool arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr) { return arch_test_and_set_bit(nr, addr); } static __always_inline bool arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; asm(__ASM_SIZE(bts) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : ADDR, "Ir" (nr) : "memory"); return oldbit; } static __always_inline bool arch_test_and_clear_bit(long nr, volatile unsigned long *addr) { return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); } /* * Note: the operation is performed atomically with respect to * the local CPU, but not other CPUs. Portable code should not * rely on this behaviour. * KVM relies on this behaviour on x86 for modifying memory that is also * accessed from a hypervisor on the same CPU if running in a VM: don't change * this without also updating arch/x86/kernel/kvm.c */ static __always_inline bool arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; asm volatile(__ASM_SIZE(btr) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : ADDR, "Ir" (nr) : "memory"); return oldbit; } static __always_inline bool arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { bool oldbit; asm volatile(__ASM_SIZE(btc) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : ADDR, "Ir" (nr) : "memory"); return oldbit; } static __always_inline bool arch_test_and_change_bit(long nr, volatile unsigned long *addr) { return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr); } static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) { return ((1UL << (nr & (BITS_PER_LONG-1))) & (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr) { bool oldbit; asm volatile("testb %2,%1" CC_SET(nz) : CC_OUT(nz) (oldbit) : "m" (((unsigned char *)addr)[nr >> 3]), "i" (1 << (nr & 7)) :"memory"); return oldbit; } static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; asm volatile(__ASM_SIZE(bt) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); return oldbit; } static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsigned long *addr) { return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) : variable_test_bit(nr, addr); } static __always_inline bool arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) { return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) : variable_test_bit(nr, addr); } static __always_inline unsigned long variable__ffs(unsigned long word) { asm("rep; bsf %1,%0" : "=r" (word) : ASM_INPUT_RM (word)); return word; } /** * __ffs - find first set bit in word * @word: The word to search * * Undefined if no bit exists, so code should check against 0 first. */ #define __ffs(word) \ (__builtin_constant_p(word) ? \ (unsigned long)__builtin_ctzl(word) : \ variable__ffs(word)) static __always_inline unsigned long variable_ffz(unsigned long word) { asm("rep; bsf %1,%0" : "=r" (word) : "r" (~word)); return word; } /** * ffz - find first zero bit in word * @word: The word to search * * Undefined if no zero exists, so code should check against ~0UL first. */ #define ffz(word) \ (__builtin_constant_p(word) ? \ (unsigned long)__builtin_ctzl(~word) : \ variable_ffz(word)) /* * __fls: find last set bit in word * @word: The word to search * * Undefined if no set bit exists, so code should check against 0 first. */ static __always_inline unsigned long __fls(unsigned long word) { if (__builtin_constant_p(word)) return BITS_PER_LONG - 1 - __builtin_clzl(word); asm("bsr %1,%0" : "=r" (word) : ASM_INPUT_RM (word)); return word; } #undef ADDR #ifdef __KERNEL__ static __always_inline int variable_ffs(int x) { int r; #ifdef CONFIG_X86_64 /* * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the * dest reg is undefined if x==0, but their CPU architect says its * value is written to set it to the same as before, except that the * top 32 bits will be cleared. * * We cannot do this on 32 bits because at the very least some * 486 CPUs did not behave this way. */ asm("bsfl %1,%0" : "=r" (r) : ASM_INPUT_RM (x), "0" (-1)); #elif defined(CONFIG_X86_CMOV) asm("bsfl %1,%0\n\t" "cmovzl %2,%0" : "=&r" (r) : "rm" (x), "r" (-1)); #else asm("bsfl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); #endif return r + 1; } /** * ffs - find first set bit in word * @x: the word to search * * This is defined the same way as the libc and compiler builtin ffs * routines, therefore differs in spirit from the other bitops. * * ffs(value) returns 0 if value is 0 or the position of the first * set bit if value is nonzero. The first (least significant) bit * is at position 1. */ #define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x)) /** * fls - find last set bit in word * @x: the word to search * * This is defined in a similar way as the libc and compiler builtin * ffs, but returns the position of the most significant set bit. * * fls(value) returns 0 if value is 0 or the position of the last * set bit if value is nonzero. The last (most significant) bit is * at position 32. */ static __always_inline int fls(unsigned int x) { int r; if (__builtin_constant_p(x)) return x ? 32 - __builtin_clz(x) : 0; #ifdef CONFIG_X86_64 /* * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the * dest reg is undefined if x==0, but their CPU architect says its * value is written to set it to the same as before, except that the * top 32 bits will be cleared. * * We cannot do this on 32 bits because at the very least some * 486 CPUs did not behave this way. */ asm("bsrl %1,%0" : "=r" (r) : ASM_INPUT_RM (x), "0" (-1)); #elif defined(CONFIG_X86_CMOV) asm("bsrl %1,%0\n\t" "cmovzl %2,%0" : "=&r" (r) : "rm" (x), "rm" (-1)); #else asm("bsrl %1,%0\n\t" "jnz 1f\n\t" "movl $-1,%0\n" "1:" : "=r" (r) : "rm" (x)); #endif return r + 1; } /** * fls64 - find last set bit in a 64-bit word * @x: the word to search * * This is defined in a similar way as the libc and compiler builtin * ffsll, but returns the position of the most significant set bit. * * fls64(value) returns 0 if value is 0 or the position of the last * set bit if value is nonzero. The last (most significant) bit is * at position 64. */ #ifdef CONFIG_X86_64 static __always_inline int fls64(__u64 x) { int bitpos = -1; if (__builtin_constant_p(x)) return x ? 64 - __builtin_clzll(x) : 0; /* * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the * dest reg is undefined if x==0, but their CPU architect says its * value is written to set it to the same as before. */ asm("bsrq %1,%q0" : "+r" (bitpos) : ASM_INPUT_RM (x)); return bitpos + 1; } #else #include <asm-generic/bitops/fls64.h> #endif #include <asm-generic/bitops/sched.h> #include <asm/arch_hweight.h> #include <asm-generic/bitops/const_hweight.h> #include <asm-generic/bitops/instrumented-atomic.h> #include <asm-generic/bitops/instrumented-non-atomic.h> #include <asm-generic/bitops/instrumented-lock.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __KERNEL__ */ #endif /* _ASM_X86_BITOPS_H */
7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM jbd2 #if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_JBD2_H #include <linux/jbd2.h> #include <linux/tracepoint.h> struct transaction_chp_stats_s; struct transaction_run_stats_s; TRACE_EVENT(jbd2_checkpoint, TP_PROTO(journal_t *journal, int result), TP_ARGS(journal, result), TP_STRUCT__entry( __field( dev_t, dev ) __field( int, result ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->result = result; ), TP_printk("dev %d,%d result %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result) ); DECLARE_EVENT_CLASS(jbd2_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) __field( tid_t, transaction ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; ), TP_printk("dev %d,%d transaction %u sync %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit) ); DEFINE_EVENT(jbd2_commit, jbd2_start_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_locking, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_commit_logging, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction) ); TRACE_EVENT(jbd2_end_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), TP_ARGS(journal, commit_transaction), TP_STRUCT__entry( __field( dev_t, dev ) __field( char, sync_commit ) __field( tid_t, transaction ) __field( tid_t, head ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->sync_commit = commit_transaction->t_synchronous_commit; __entry->transaction = commit_transaction->t_tid; __entry->head = journal->j_tail_sequence; ), TP_printk("dev %d,%d transaction %u sync %d head %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->transaction, __entry->sync_commit, __entry->head) ); TRACE_EVENT(jbd2_submit_inode_data, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; ), TP_printk("dev %d,%d ino %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino) ); DECLARE_EVENT_CLASS(jbd2_handle_start_class, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, requested_blocks) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->requested_blocks = requested_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u " "requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int requested_blocks), TP_ARGS(dev, tid, type, line_no, requested_blocks) ); TRACE_EVENT(jbd2_handle_extend, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int buffer_credits, int requested_blocks), TP_ARGS(dev, tid, type, line_no, buffer_credits, requested_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, buffer_credits ) __field( int, requested_blocks) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->buffer_credits = buffer_credits; __entry->requested_blocks = requested_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u " "buffer_credits %d requested_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->buffer_credits, __entry->requested_blocks) ); TRACE_EVENT(jbd2_handle_stats, TP_PROTO(dev_t dev, tid_t tid, unsigned int type, unsigned int line_no, int interval, int sync, int requested_blocks, int dirtied_blocks), TP_ARGS(dev, tid, type, line_no, interval, sync, requested_blocks, dirtied_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned int, type ) __field( unsigned int, line_no ) __field( int, interval ) __field( int, sync ) __field( int, requested_blocks) __field( int, dirtied_blocks ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->type = type; __entry->line_no = line_no; __entry->interval = interval; __entry->sync = sync; __entry->requested_blocks = requested_blocks; __entry->dirtied_blocks = dirtied_blocks; ), TP_printk("dev %d,%d tid %u type %u line_no %u interval %d " "sync %d requested_blocks %d dirtied_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->type, __entry->line_no, __entry->interval, __entry->sync, __entry->requested_blocks, __entry->dirtied_blocks) ); TRACE_EVENT(jbd2_run_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned long, wait ) __field( unsigned long, request_delay ) __field( unsigned long, running ) __field( unsigned long, locked ) __field( unsigned long, flushing ) __field( unsigned long, logging ) __field( __u32, handle_count ) __field( __u32, blocks ) __field( __u32, blocks_logged ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->wait = stats->rs_wait; __entry->request_delay = stats->rs_request_delay; __entry->running = stats->rs_running; __entry->locked = stats->rs_locked; __entry->flushing = stats->rs_flushing; __entry->logging = stats->rs_logging; __entry->handle_count = stats->rs_handle_count; __entry->blocks = stats->rs_blocks; __entry->blocks_logged = stats->rs_blocks_logged; ), TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u " "locked %u flushing %u logging %u handle_count %u " "blocks %u blocks_logged %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->wait), jiffies_to_msecs(__entry->request_delay), jiffies_to_msecs(__entry->running), jiffies_to_msecs(__entry->locked), jiffies_to_msecs(__entry->flushing), jiffies_to_msecs(__entry->logging), __entry->handle_count, __entry->blocks, __entry->blocks_logged) ); TRACE_EVENT(jbd2_checkpoint_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_chp_stats_s *stats), TP_ARGS(dev, tid, stats), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tid ) __field( unsigned long, chp_time ) __field( __u32, forced_to_close ) __field( __u32, written ) __field( __u32, dropped ) ), TP_fast_assign( __entry->dev = dev; __entry->tid = tid; __entry->chp_time = stats->cs_chp_time; __entry->forced_to_close= stats->cs_forced_to_close; __entry->written = stats->cs_written; __entry->dropped = stats->cs_dropped; ), TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u " "written %u dropped %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, jiffies_to_msecs(__entry->chp_time), __entry->forced_to_close, __entry->written, __entry->dropped) ); TRACE_EVENT(jbd2_update_log_tail, TP_PROTO(journal_t *journal, tid_t first_tid, unsigned long block_nr, unsigned long freed), TP_ARGS(journal, first_tid, block_nr, freed), TP_STRUCT__entry( __field( dev_t, dev ) __field( tid_t, tail_sequence ) __field( tid_t, first_tid ) __field(unsigned long, block_nr ) __field(unsigned long, freed ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->tail_sequence = journal->j_tail_sequence; __entry->first_tid = first_tid; __entry->block_nr = block_nr; __entry->freed = freed; ), TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tail_sequence, __entry->first_tid, __entry->block_nr, __entry->freed) ); TRACE_EVENT(jbd2_write_superblock, TP_PROTO(journal_t *journal, blk_opf_t write_flags), TP_ARGS(journal, write_flags), TP_STRUCT__entry( __field( dev_t, dev ) __field( blk_opf_t, write_flags ) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->write_flags = write_flags; ), TP_printk("dev %d,%d write_flags %x", MAJOR(__entry->dev), MINOR(__entry->dev), (__force u32)__entry->write_flags) ); TRACE_EVENT(jbd2_lock_buffer_stall, TP_PROTO(dev_t dev, unsigned long stall_ms), TP_ARGS(dev, stall_ms), TP_STRUCT__entry( __field( dev_t, dev ) __field(unsigned long, stall_ms ) ), TP_fast_assign( __entry->dev = dev; __entry->stall_ms = stall_ms; ), TP_printk("dev %d,%d stall_ms %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->stall_ms) ); DECLARE_EVENT_CLASS(jbd2_journal_shrink, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, nr_to_scan) __field(unsigned long, count) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->nr_to_scan = nr_to_scan; __entry->count = count; ), TP_printk("dev %d,%d nr_to_scan %lu count %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->count) ); DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_count, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count) ); DEFINE_EVENT(jbd2_journal_shrink, jbd2_shrink_scan_enter, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long count), TP_ARGS(journal, nr_to_scan, count) ); TRACE_EVENT(jbd2_shrink_scan_exit, TP_PROTO(journal_t *journal, unsigned long nr_to_scan, unsigned long nr_shrunk, unsigned long count), TP_ARGS(journal, nr_to_scan, nr_shrunk, count), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, nr_to_scan) __field(unsigned long, nr_shrunk) __field(unsigned long, count) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->nr_to_scan = nr_to_scan; __entry->nr_shrunk = nr_shrunk; __entry->count = count; ), TP_printk("dev %d,%d nr_to_scan %lu nr_shrunk %lu count %lu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->nr_shrunk, __entry->count) ); TRACE_EVENT(jbd2_shrink_checkpoint_list, TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid, unsigned long nr_freed, tid_t next_tid), TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid), TP_STRUCT__entry( __field(dev_t, dev) __field(tid_t, first_tid) __field(tid_t, tid) __field(tid_t, last_tid) __field(unsigned long, nr_freed) __field(tid_t, next_tid) ), TP_fast_assign( __entry->dev = journal->j_fs_dev->bd_dev; __entry->first_tid = first_tid; __entry->tid = tid; __entry->last_tid = last_tid; __entry->nr_freed = nr_freed; __entry->next_tid = next_tid; ), TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu " "next transaction %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->first_tid, __entry->tid, __entry->last_tid, __entry->nr_freed, __entry->next_tid) ); #endif /* _TRACE_JBD2_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
3197 3202 3194 3202 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 /* SPDX-License-Identifier: GPL-2.0 */ /* * generic net pointers */ #ifndef __NET_GENERIC_H__ #define __NET_GENERIC_H__ #include <linux/bug.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> /* * Generic net pointers are to be used by modules to put some private * stuff on the struct net without explicit struct net modification * * The rules are simple: * 1. set pernet_operations->id. After register_pernet_device you * will have the id of your private pointer. * 2. set pernet_operations->size to have the code allocate and free * a private structure pointed to from struct net. * 3. do not change this pointer while the net is alive; * 4. do not try to have any private reference on the net_generic object. * * After accomplishing all of the above, the private pointer can be * accessed with the net_generic() call. */ struct net_generic { union { struct { unsigned int len; struct rcu_head rcu; } s; DECLARE_FLEX_ARRAY(void *, ptr); }; }; static inline void *net_generic(const struct net *net, unsigned int id) { struct net_generic *ng; void *ptr; rcu_read_lock(); ng = rcu_dereference(net->gen); ptr = ng->ptr[id]; rcu_read_unlock(); return ptr; } #endif
2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 // SPDX-License-Identifier: GPL-2.0-or-later /* * Mirics MSi001 silicon tuner driver * * Copyright (C) 2013 Antti Palosaari <crope@iki.fi> * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> */ #include <linux/module.h> #include <linux/gcd.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> static const struct v4l2_frequency_band bands[] = { { .type = V4L2_TUNER_RF, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 49000000, .rangehigh = 263000000, }, { .type = V4L2_TUNER_RF, .index = 1, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 390000000, .rangehigh = 960000000, }, }; struct msi001_dev { struct spi_device *spi; struct v4l2_subdev sd; /* Controls */ struct v4l2_ctrl_handler hdl; struct v4l2_ctrl *bandwidth_auto; struct v4l2_ctrl *bandwidth; struct v4l2_ctrl *lna_gain; struct v4l2_ctrl *mixer_gain; struct v4l2_ctrl *if_gain; unsigned int f_tuner; }; static inline struct msi001_dev *sd_to_msi001_dev(struct v4l2_subdev *sd) { return container_of(sd, struct msi001_dev, sd); } static int msi001_wreg(struct msi001_dev *dev, u32 data) { /* Register format: 4 bits addr + 20 bits value */ return spi_write(dev->spi, &data, 3); }; static int msi001_set_gain(struct msi001_dev *dev, int lna_gain, int mixer_gain, int if_gain) { struct spi_device *spi = dev->spi; int ret; u32 reg; dev_dbg(&spi->dev, "lna=%d mixer=%d if=%d\n", lna_gain, mixer_gain, if_gain); reg = 1 << 0; reg |= (59 - if_gain) << 4; reg |= 0 << 10; reg |= (1 - mixer_gain) << 12; reg |= (1 - lna_gain) << 13; reg |= 4 << 14; reg |= 0 << 17; ret = msi001_wreg(dev, reg); if (ret) goto err; return 0; err: dev_dbg(&spi->dev, "failed %d\n", ret); return ret; }; static int msi001_set_tuner(struct msi001_dev *dev) { struct spi_device *spi = dev->spi; int ret, i; unsigned int uitmp, div_n, k, k_thresh, k_frac, div_lo, f_if1; u32 reg; u64 f_vco; u8 mode, filter_mode; static const struct { u32 rf; u8 mode; u8 div_lo; } band_lut[] = { { 50000000, 0xe1, 16}, /* AM_MODE2, antenna 2 */ {108000000, 0x42, 32}, /* VHF_MODE */ {330000000, 0x44, 16}, /* B3_MODE */ {960000000, 0x48, 4}, /* B45_MODE */ { ~0U, 0x50, 2}, /* BL_MODE */ }; static const struct { u32 freq; u8 filter_mode; } if_freq_lut[] = { { 0, 0x03}, /* Zero IF */ { 450000, 0x02}, /* 450 kHz IF */ {1620000, 0x01}, /* 1.62 MHz IF */ {2048000, 0x00}, /* 2.048 MHz IF */ }; static const struct { u32 freq; u8 val; } bandwidth_lut[] = { { 200000, 0x00}, /* 200 kHz */ { 300000, 0x01}, /* 300 kHz */ { 600000, 0x02}, /* 600 kHz */ {1536000, 0x03}, /* 1.536 MHz */ {5000000, 0x04}, /* 5 MHz */ {6000000, 0x05}, /* 6 MHz */ {7000000, 0x06}, /* 7 MHz */ {8000000, 0x07}, /* 8 MHz */ }; unsigned int f_rf = dev->f_tuner; /* * bandwidth (Hz) * 200000, 300000, 600000, 1536000, 5000000, 6000000, 7000000, 8000000 */ unsigned int bandwidth; /* * intermediate frequency (Hz) * 0, 450000, 1620000, 2048000 */ unsigned int f_if = 0; #define F_REF 24000000 #define DIV_PRE_N 4 #define F_VCO_STEP div_lo dev_dbg(&spi->dev, "f_rf=%d f_if=%d\n", f_rf, f_if); for (i = 0; i < ARRAY_SIZE(band_lut); i++) { if (f_rf <= band_lut[i].rf) { mode = band_lut[i].mode; div_lo = band_lut[i].div_lo; break; } } if (i == ARRAY_SIZE(band_lut)) { ret = -EINVAL; goto err; } /* AM_MODE is upconverted */ if ((mode >> 0) & 0x1) f_if1 = 5 * F_REF; else f_if1 = 0; for (i = 0; i < ARRAY_SIZE(if_freq_lut); i++) { if (f_if == if_freq_lut[i].freq) { filter_mode = if_freq_lut[i].filter_mode; break; } } if (i == ARRAY_SIZE(if_freq_lut)) { ret = -EINVAL; goto err; } /* filters */ bandwidth = dev->bandwidth->val; bandwidth = clamp(bandwidth, 200000U, 8000000U); for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) { if (bandwidth <= bandwidth_lut[i].freq) { bandwidth = bandwidth_lut[i].val; break; } } if (i == ARRAY_SIZE(bandwidth_lut)) { ret = -EINVAL; goto err; } dev->bandwidth->val = bandwidth_lut[i].freq; dev_dbg(&spi->dev, "bandwidth selected=%d\n", bandwidth_lut[i].freq); /* * Fractional-N synthesizer * * +---------------------------------------+ * v | * Fref +----+ +-------+ +----+ +------+ +---+ * ------> | PD | --> | VCO | ------> | /4 | --> | /N.F | <-- | K | * +----+ +-------+ +----+ +------+ +---+ * | * | * v * +-------+ Fout * | /Rout | ------> * +-------+ */ /* Calculate PLL integer and fractional control word. */ f_vco = (u64) (f_rf + f_if + f_if1) * div_lo; div_n = div_u64_rem(f_vco, DIV_PRE_N * F_REF, &k); k_thresh = (DIV_PRE_N * F_REF) / F_VCO_STEP; k_frac = div_u64((u64) k * k_thresh, (DIV_PRE_N * F_REF)); /* Find out greatest common divisor and divide to smaller. */ uitmp = gcd(k_thresh, k_frac); k_thresh /= uitmp; k_frac /= uitmp; /* Force divide to reg max. Resolution will be reduced. */ uitmp = DIV_ROUND_UP(k_thresh, 4095); k_thresh = DIV_ROUND_CLOSEST(k_thresh, uitmp); k_frac = DIV_ROUND_CLOSEST(k_frac, uitmp); /* Calculate real RF set. */ uitmp = (unsigned int) F_REF * DIV_PRE_N * div_n; uitmp += (unsigned int) F_REF * DIV_PRE_N * k_frac / k_thresh; uitmp /= div_lo; dev_dbg(&spi->dev, "f_rf=%u:%u f_vco=%llu div_n=%u k_thresh=%u k_frac=%u div_lo=%u\n", f_rf, uitmp, f_vco, div_n, k_thresh, k_frac, div_lo); ret = msi001_wreg(dev, 0x00000e); if (ret) goto err; ret = msi001_wreg(dev, 0x000003); if (ret) goto err; reg = 0 << 0; reg |= mode << 4; reg |= filter_mode << 12; reg |= bandwidth << 14; reg |= 0x02 << 17; reg |= 0x00 << 20; ret = msi001_wreg(dev, reg); if (ret) goto err; reg = 5 << 0; reg |= k_thresh << 4; reg |= 1 << 19; reg |= 1 << 21; ret = msi001_wreg(dev, reg); if (ret) goto err; reg = 2 << 0; reg |= k_frac << 4; reg |= div_n << 16; ret = msi001_wreg(dev, reg); if (ret) goto err; ret = msi001_set_gain(dev, dev->lna_gain->cur.val, dev->mixer_gain->cur.val, dev->if_gain->cur.val); if (ret) goto err; reg = 6 << 0; reg |= 63 << 4; reg |= 4095 << 10; ret = msi001_wreg(dev, reg); if (ret) goto err; return 0; err: dev_dbg(&spi->dev, "failed %d\n", ret); return ret; } static int msi001_standby(struct v4l2_subdev *sd) { struct msi001_dev *dev = sd_to_msi001_dev(sd); return msi001_wreg(dev, 0x000000); } static int msi001_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *v) { struct msi001_dev *dev = sd_to_msi001_dev(sd); struct spi_device *spi = dev->spi; dev_dbg(&spi->dev, "index=%d\n", v->index); strscpy(v->name, "Mirics MSi001", sizeof(v->name)); v->type = V4L2_TUNER_RF; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = 49000000; v->rangehigh = 960000000; return 0; } static int msi001_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *v) { struct msi001_dev *dev = sd_to_msi001_dev(sd); struct spi_device *spi = dev->spi; dev_dbg(&spi->dev, "index=%d\n", v->index); return 0; } static int msi001_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f) { struct msi001_dev *dev = sd_to_msi001_dev(sd); struct spi_device *spi = dev->spi; dev_dbg(&spi->dev, "tuner=%d\n", f->tuner); f->frequency = dev->f_tuner; return 0; } static int msi001_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f) { struct msi001_dev *dev = sd_to_msi001_dev(sd); struct spi_device *spi = dev->spi; unsigned int band; dev_dbg(&spi->dev, "tuner=%d type=%d frequency=%u\n", f->tuner, f->type, f->frequency); if (f->frequency < ((bands[0].rangehigh + bands[1].rangelow) / 2)) band = 0; else band = 1; dev->f_tuner = clamp_t(unsigned int, f->frequency, bands[band].rangelow, bands[band].rangehigh); return msi001_set_tuner(dev); } static int msi001_enum_freq_bands(struct v4l2_subdev *sd, struct v4l2_frequency_band *band) { struct msi001_dev *dev = sd_to_msi001_dev(sd); struct spi_device *spi = dev->spi; dev_dbg(&spi->dev, "tuner=%d type=%d index=%d\n", band->tuner, band->type, band->index); if (band->index >= ARRAY_SIZE(bands)) return -EINVAL; band->capability = bands[band->index].capability; band->rangelow = bands[band->index].rangelow; band->rangehigh = bands[band->index].rangehigh; return 0; } static const struct v4l2_subdev_tuner_ops msi001_tuner_ops = { .standby = msi001_standby, .g_tuner = msi001_g_tuner, .s_tuner = msi001_s_tuner, .g_frequency = msi001_g_frequency, .s_frequency = msi001_s_frequency, .enum_freq_bands = msi001_enum_freq_bands, }; static const struct v4l2_subdev_ops msi001_ops = { .tuner = &msi001_tuner_ops, }; static int msi001_s_ctrl(struct v4l2_ctrl *ctrl) { struct msi001_dev *dev = container_of(ctrl->handler, struct msi001_dev, hdl); struct spi_device *spi = dev->spi; int ret; dev_dbg(&spi->dev, "id=%d name=%s val=%d min=%lld max=%lld step=%lld\n", ctrl->id, ctrl->name, ctrl->val, ctrl->minimum, ctrl->maximum, ctrl->step); switch (ctrl->id) { case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: case V4L2_CID_RF_TUNER_BANDWIDTH: ret = msi001_set_tuner(dev); break; case V4L2_CID_RF_TUNER_LNA_GAIN: ret = msi001_set_gain(dev, dev->lna_gain->val, dev->mixer_gain->cur.val, dev->if_gain->cur.val); break; case V4L2_CID_RF_TUNER_MIXER_GAIN: ret = msi001_set_gain(dev, dev->lna_gain->cur.val, dev->mixer_gain->val, dev->if_gain->cur.val); break; case V4L2_CID_RF_TUNER_IF_GAIN: ret = msi001_set_gain(dev, dev->lna_gain->cur.val, dev->mixer_gain->cur.val, dev->if_gain->val); break; default: dev_dbg(&spi->dev, "unknown control %d\n", ctrl->id); ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops msi001_ctrl_ops = { .s_ctrl = msi001_s_ctrl, }; static int msi001_probe(struct spi_device *spi) { struct msi001_dev *dev; int ret; dev_dbg(&spi->dev, "\n"); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err; } dev->spi = spi; dev->f_tuner = bands[0].rangelow; v4l2_spi_subdev_init(&dev->sd, spi, &msi001_ops); /* Register controls */ v4l2_ctrl_handler_init(&dev->hdl, 5); dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1); dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_BANDWIDTH, 200000, 8000000, 1, 200000); if (dev->hdl.error) { ret = dev->hdl.error; dev_err(&spi->dev, "Could not initialize controls\n"); /* control init failed, free handler */ goto err_ctrl_handler_free; } v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false); dev->lna_gain = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 1, 1, 1); dev->mixer_gain = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 1, 1, 1); dev->if_gain = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_IF_GAIN, 0, 59, 1, 0); if (dev->hdl.error) { ret = dev->hdl.error; dev_err(&spi->dev, "Could not initialize controls\n"); /* control init failed, free handler */ goto err_ctrl_handler_free; } dev->sd.ctrl_handler = &dev->hdl; return 0; err_ctrl_handler_free: v4l2_ctrl_handler_free(&dev->hdl); kfree(dev); err: return ret; } static void msi001_remove(struct spi_device *spi) { struct v4l2_subdev *sd = spi_get_drvdata(spi); struct msi001_dev *dev = sd_to_msi001_dev(sd); dev_dbg(&spi->dev, "\n"); /* * Registered by v4l2_spi_new_subdev() from master driver, but we must * unregister it from here. Weird. */ v4l2_device_unregister_subdev(&dev->sd); v4l2_ctrl_handler_free(&dev->hdl); kfree(dev); } static const struct spi_device_id msi001_id_table[] = { {"msi001", 0}, {} }; MODULE_DEVICE_TABLE(spi, msi001_id_table); static struct spi_driver msi001_driver = { .driver = { .name = "msi001", .suppress_bind_attrs = true, }, .probe = msi001_probe, .remove = msi001_remove, .id_table = msi001_id_table, }; module_spi_driver(msi001_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Mirics MSi001"); MODULE_LICENSE("GPL");
1 1 5 5 5 4 4 4 5 3 2 6 6 6 6 1 1 1 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 // SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_usb6501.c * Comedi driver for National Instruments USB-6501 * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2014 Luca Ellero <luca.ellero@brickedbrain.com> */ /* * Driver: ni_usb6501 * Description: National Instruments USB-6501 module * Devices: [National Instruments] USB-6501 (ni_usb6501) * Author: Luca Ellero <luca.ellero@brickedbrain.com> * Updated: 8 Sep 2014 * Status: works * * * Configuration Options: * none */ /* * NI-6501 - USB PROTOCOL DESCRIPTION * * Every command is composed by two USB packets: * - request (out) * - response (in) * * Every packet is at least 12 bytes long, here is the meaning of * every field (all values are hex): * * byte 0 is always 00 * byte 1 is always 01 * byte 2 is always 00 * byte 3 is the total packet length * * byte 4 is always 00 * byte 5 is the total packet length - 4 * byte 6 is always 01 * byte 7 is the command * * byte 8 is 02 (request) or 00 (response) * byte 9 is 00 (response) or 10 (port request) or 20 (counter request) * byte 10 is always 00 * byte 11 is 00 (request) or 02 (response) * * PORT PACKETS * * CMD: 0xE READ_PORT * REQ: 00 01 00 10 00 0C 01 0E 02 10 00 00 00 03 <PORT> 00 * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 00 03 <BMAP> 00 * * CMD: 0xF WRITE_PORT * REQ: 00 01 00 14 00 10 01 0F 02 10 00 00 00 03 <PORT> 00 03 <BMAP> 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD: 0x12 SET_PORT_DIR (0 = input, 1 = output) * REQ: 00 01 00 18 00 14 01 12 02 10 00 00 * 00 05 <PORT 0> <PORT 1> <PORT 2> 00 05 00 00 00 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * COUNTER PACKETS * * CMD 0x9: START_COUNTER * REQ: 00 01 00 0C 00 08 01 09 02 20 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD 0xC: STOP_COUNTER * REQ: 00 01 00 0C 00 08 01 0C 02 20 00 00 * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * CMD 0xE: READ_COUNTER * REQ: 00 01 00 0C 00 08 01 0E 02 20 00 00 * RES: 00 01 00 10 00 0C 01 00 00 00 00 02 <u32 counter value, Big Endian> * * CMD 0xF: WRITE_COUNTER * REQ: 00 01 00 10 00 0C 01 0F 02 20 00 00 <u32 counter value, Big Endian> * RES: 00 01 00 0C 00 08 01 00 00 00 00 02 * * * Please visit https://www.brickedbrain.com if you need * additional information or have any questions. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/comedi/comedi_usb.h> #define NI6501_TIMEOUT 1000 /* Port request packets */ static const u8 READ_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x0E, 0x02, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00}; static const u8 WRITE_PORT_REQUEST[] = {0x00, 0x01, 0x00, 0x14, 0x00, 0x10, 0x01, 0x0F, 0x02, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00}; static const u8 SET_PORT_DIR_REQUEST[] = {0x00, 0x01, 0x00, 0x18, 0x00, 0x14, 0x01, 0x12, 0x02, 0x10, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00}; /* Counter request packets */ static const u8 START_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x09, 0x02, 0x20, 0x00, 0x00}; static const u8 STOP_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x0C, 0x02, 0x20, 0x00, 0x00}; static const u8 READ_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x0E, 0x02, 0x20, 0x00, 0x00}; static const u8 WRITE_COUNTER_REQUEST[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x0F, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; /* Response packets */ static const u8 GENERIC_RESPONSE[] = {0x00, 0x01, 0x00, 0x0C, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02}; static const u8 READ_PORT_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x03, 0x00, 0x00}; static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x0C, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}; /* Largest supported packets */ static const size_t TX_MAX_SIZE = sizeof(SET_PORT_DIR_REQUEST); static const size_t RX_MAX_SIZE = sizeof(READ_PORT_RESPONSE); enum commands { READ_PORT, WRITE_PORT, SET_PORT_DIR, START_COUNTER, STOP_COUNTER, READ_COUNTER, WRITE_COUNTER }; struct ni6501_private { struct usb_endpoint_descriptor *ep_rx; struct usb_endpoint_descriptor *ep_tx; struct mutex mut; u8 *usb_rx_buf; u8 *usb_tx_buf; }; static int ni6501_port_command(struct comedi_device *dev, int command, unsigned int val, u8 *bitmap) { struct usb_device *usb = comedi_to_usb_dev(dev); struct ni6501_private *devpriv = dev->private; int request_size, response_size; u8 *tx = devpriv->usb_tx_buf; int ret; if (command != SET_PORT_DIR && !bitmap) return -EINVAL; mutex_lock(&devpriv->mut); switch (command) { case READ_PORT: request_size = sizeof(READ_PORT_REQUEST); response_size = sizeof(READ_PORT_RESPONSE); memcpy(tx, READ_PORT_REQUEST, request_size); tx[14] = val & 0xff; break; case WRITE_PORT: request_size = sizeof(WRITE_PORT_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, WRITE_PORT_REQUEST, request_size); tx[14] = val & 0xff; tx[17] = *bitmap; break; case SET_PORT_DIR: request_size = sizeof(SET_PORT_DIR_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, SET_PORT_DIR_REQUEST, request_size); tx[14] = val & 0xff; tx[15] = (val >> 8) & 0xff; tx[16] = (val >> 16) & 0xff; break; default: ret = -EINVAL; goto end; } ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->ep_tx->bEndpointAddress), devpriv->usb_tx_buf, request_size, NULL, NI6501_TIMEOUT); if (ret) goto end; ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->ep_rx->bEndpointAddress), devpriv->usb_rx_buf, response_size, NULL, NI6501_TIMEOUT); if (ret) goto end; /* Check if results are valid */ if (command == READ_PORT) { *bitmap = devpriv->usb_rx_buf[14]; /* mask bitmap for comparing */ devpriv->usb_rx_buf[14] = 0x00; if (memcmp(devpriv->usb_rx_buf, READ_PORT_RESPONSE, sizeof(READ_PORT_RESPONSE))) { ret = -EINVAL; } } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE, sizeof(GENERIC_RESPONSE))) { ret = -EINVAL; } end: mutex_unlock(&devpriv->mut); return ret; } static int ni6501_counter_command(struct comedi_device *dev, int command, u32 *val) { struct usb_device *usb = comedi_to_usb_dev(dev); struct ni6501_private *devpriv = dev->private; int request_size, response_size; u8 *tx = devpriv->usb_tx_buf; int ret; if ((command == READ_COUNTER || command == WRITE_COUNTER) && !val) return -EINVAL; mutex_lock(&devpriv->mut); switch (command) { case START_COUNTER: request_size = sizeof(START_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, START_COUNTER_REQUEST, request_size); break; case STOP_COUNTER: request_size = sizeof(STOP_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, STOP_COUNTER_REQUEST, request_size); break; case READ_COUNTER: request_size = sizeof(READ_COUNTER_REQUEST); response_size = sizeof(READ_COUNTER_RESPONSE); memcpy(tx, READ_COUNTER_REQUEST, request_size); break; case WRITE_COUNTER: request_size = sizeof(WRITE_COUNTER_REQUEST); response_size = sizeof(GENERIC_RESPONSE); memcpy(tx, WRITE_COUNTER_REQUEST, request_size); /* Setup tx packet: bytes 12,13,14,15 hold the */ /* u32 counter value (Big Endian) */ *((__be32 *)&tx[12]) = cpu_to_be32(*val); break; default: ret = -EINVAL; goto end; } ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->ep_tx->bEndpointAddress), devpriv->usb_tx_buf, request_size, NULL, NI6501_TIMEOUT); if (ret) goto end; ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->ep_rx->bEndpointAddress), devpriv->usb_rx_buf, response_size, NULL, NI6501_TIMEOUT); if (ret) goto end; /* Check if results are valid */ if (command == READ_COUNTER) { int i; /* Read counter value: bytes 12,13,14,15 of rx packet */ /* hold the u32 counter value (Big Endian) */ *val = be32_to_cpu(*((__be32 *)&devpriv->usb_rx_buf[12])); /* mask counter value for comparing */ for (i = 12; i < sizeof(READ_COUNTER_RESPONSE); ++i) devpriv->usb_rx_buf[i] = 0x00; if (memcmp(devpriv->usb_rx_buf, READ_COUNTER_RESPONSE, sizeof(READ_COUNTER_RESPONSE))) { ret = -EINVAL; } } else if (memcmp(devpriv->usb_rx_buf, GENERIC_RESPONSE, sizeof(GENERIC_RESPONSE))) { ret = -EINVAL; } end: mutex_unlock(&devpriv->mut); return ret; } static int ni6501_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); if (ret) return ret; ret = ni6501_port_command(dev, SET_PORT_DIR, s->io_bits, NULL); if (ret) return ret; return insn->n; } static int ni6501_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; int ret; u8 port; u8 bitmap; mask = comedi_dio_update_state(s, data); for (port = 0; port < 3; port++) { if (mask & (0xFF << port * 8)) { bitmap = (s->state >> port * 8) & 0xFF; ret = ni6501_port_command(dev, WRITE_PORT, port, &bitmap); if (ret) return ret; } } data[1] = 0; for (port = 0; port < 3; port++) { ret = ni6501_port_command(dev, READ_PORT, port, &bitmap); if (ret) return ret; data[1] |= bitmap << port * 8; } return insn->n; } static int ni6501_cnt_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; u32 val = 0; switch (data[0]) { case INSN_CONFIG_ARM: ret = ni6501_counter_command(dev, START_COUNTER, NULL); break; case INSN_CONFIG_DISARM: ret = ni6501_counter_command(dev, STOP_COUNTER, NULL); break; case INSN_CONFIG_RESET: ret = ni6501_counter_command(dev, STOP_COUNTER, NULL); if (ret) break; ret = ni6501_counter_command(dev, WRITE_COUNTER, &val); break; default: return -EINVAL; } return ret ? ret : insn->n; } static int ni6501_cnt_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; u32 val; unsigned int i; for (i = 0; i < insn->n; i++) { ret = ni6501_counter_command(dev, READ_COUNTER, &val); if (ret) return ret; data[i] = val; } return insn->n; } static int ni6501_cnt_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; if (insn->n) { u32 val = data[insn->n - 1]; ret = ni6501_counter_command(dev, WRITE_COUNTER, &val); if (ret) return ret; } return insn->n; } static int ni6501_alloc_usb_buffers(struct comedi_device *dev) { struct ni6501_private *devpriv = dev->private; size_t size; size = usb_endpoint_maxp(devpriv->ep_rx); devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_rx_buf) return -ENOMEM; size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; return 0; } static int ni6501_find_endpoints(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv = dev->private; struct usb_host_interface *iface_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_desc; int i; if (iface_desc->desc.bNumEndpoints != 2) { dev_err(dev->class_dev, "Wrong number of endpoints\n"); return -ENODEV; } for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { ep_desc = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc)) { if (!devpriv->ep_rx) devpriv->ep_rx = ep_desc; continue; } if (usb_endpoint_is_bulk_out(ep_desc)) { if (!devpriv->ep_tx) devpriv->ep_tx = ep_desc; continue; } } if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; if (usb_endpoint_maxp(devpriv->ep_rx) < RX_MAX_SIZE) return -ENODEV; if (usb_endpoint_maxp(devpriv->ep_tx) < TX_MAX_SIZE) return -ENODEV; return 0; } static int ni6501_auto_attach(struct comedi_device *dev, unsigned long context) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv; struct comedi_subdevice *s; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; mutex_init(&devpriv->mut); usb_set_intfdata(intf, devpriv); ret = ni6501_find_endpoints(dev); if (ret) return ret; ret = ni6501_alloc_usb_buffers(dev); if (ret) return ret; ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; /* Digital Input/Output subdevice */ s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 24; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = ni6501_dio_insn_bits; s->insn_config = ni6501_dio_insn_config; /* Counter subdevice */ s = &dev->subdevices[1]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL; s->n_chan = 1; s->maxdata = 0xffffffff; s->insn_read = ni6501_cnt_insn_read; s->insn_write = ni6501_cnt_insn_write; s->insn_config = ni6501_cnt_insn_config; return 0; } static void ni6501_detach(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct ni6501_private *devpriv = dev->private; if (!devpriv) return; mutex_destroy(&devpriv->mut); usb_set_intfdata(intf, NULL); kfree(devpriv->usb_rx_buf); kfree(devpriv->usb_tx_buf); } static struct comedi_driver ni6501_driver = { .module = THIS_MODULE, .driver_name = "ni6501", .auto_attach = ni6501_auto_attach, .detach = ni6501_detach, }; static int ni6501_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return comedi_usb_auto_config(intf, &ni6501_driver, id->driver_info); } static const struct usb_device_id ni6501_usb_table[] = { { USB_DEVICE(0x3923, 0x718a) }, { } }; MODULE_DEVICE_TABLE(usb, ni6501_usb_table); static struct usb_driver ni6501_usb_driver = { .name = "ni6501", .id_table = ni6501_usb_table, .probe = ni6501_usb_probe, .disconnect = comedi_usb_auto_unconfig, }; module_comedi_usb_driver(ni6501_driver, ni6501_usb_driver); MODULE_AUTHOR("Luca Ellero"); MODULE_DESCRIPTION("Comedi driver for National Instruments USB-6501"); MODULE_LICENSE("GPL");
3229 34 1 117 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 /* SPDX-License-Identifier: GPL-2.0 */ /* * workqueue.h --- work queue handling for Linux. */ #ifndef _LINUX_WORKQUEUE_H #define _LINUX_WORKQUEUE_H #include <linux/timer.h> #include <linux/linkage.h> #include <linux/bitops.h> #include <linux/lockdep.h> #include <linux/threads.h> #include <linux/atomic.h> #include <linux/cpumask_types.h> #include <linux/rcupdate.h> #include <linux/workqueue_types.h> /* * The first word is the work queue pointer and the flags rolled into * one */ #define work_data_bits(work) ((unsigned long *)(&(work)->data)) enum work_bits { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */ WORK_STRUCT_PWQ_BIT, /* data points to pwq */ WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */ #endif WORK_STRUCT_FLAG_BITS, /* color for workqueue flushing */ WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS, WORK_STRUCT_COLOR_BITS = 4, /* * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/ * debugobjects turned off. This makes pwqs aligned to 256 bytes (512 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors. * * MSB * [ pwq pointer ] [ flush color ] [ STRUCT flags ] * 4 bits 4 or 5 bits */ WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, /* * data contains off-queue information when !WORK_STRUCT_PWQ. * * MSB * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] * 16 bits 1 bit 4 or 5 bits */ WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT, WORK_OFFQ_FLAG_END, WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, WORK_OFFQ_DISABLE_BITS = 16, /* * When a work item is off queue, the high bits encode off-queue flags * and the last pool it was on. Cap pool ID to 31 bits and use the * highest number to indicate that no pool is associated. */ WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS, WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, }; enum work_flags { WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, #else WORK_STRUCT_STATIC = 0, #endif }; enum wq_misc_consts { WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), /* not bound to any CPU, prefer the local CPU */ WORK_CPU_UNBOUND = NR_CPUS, /* bit mask for work_busy() return values */ WORK_BUSY_PENDING = 1 << 0, WORK_BUSY_RUNNING = 1 << 1, /* maximum string length for set_worker_desc() */ WORKER_DESC_LEN = 32, }; /* Convenience constants - of type 'unsigned long', not 'enum'! */ #define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT) #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) struct delayed_work { struct work_struct work; struct timer_list timer; /* target workqueue and CPU ->timer uses to queue ->work */ struct workqueue_struct *wq; int cpu; }; struct rcu_work { struct work_struct work; struct rcu_head rcu; /* target workqueue ->rcu uses to queue ->work */ struct workqueue_struct *wq; }; enum wq_affn_scope { WQ_AFFN_DFL, /* use system default */ WQ_AFFN_CPU, /* one pod per CPU */ WQ_AFFN_SMT, /* one pod poer SMT */ WQ_AFFN_CACHE, /* one pod per LLC */ WQ_AFFN_NUMA, /* one pod per NUMA node */ WQ_AFFN_SYSTEM, /* one pod across the whole system */ WQ_AFFN_NR_TYPES, }; /** * struct workqueue_attrs - A struct for workqueue attributes. * * This can be used to change attributes of an unbound workqueue. */ struct workqueue_attrs { /** * @nice: nice level */ int nice; /** * @cpumask: allowed CPUs * * Work items in this workqueue are affine to these CPUs and not allowed * to execute on other CPUs. A pool serving a workqueue must have the * same @cpumask. */ cpumask_var_t cpumask; /** * @__pod_cpumask: internal attribute used to create per-pod pools * * Internal use only. * * Per-pod unbound worker pools are used to improve locality. Always a * subset of ->cpumask. A workqueue can be associated with multiple * worker pools with disjoint @__pod_cpumask's. Whether the enforcement * of a pool's @__pod_cpumask is strict depends on @affn_strict. */ cpumask_var_t __pod_cpumask; /** * @affn_strict: affinity scope is strict * * If clear, workqueue will make a best-effort attempt at starting the * worker inside @__pod_cpumask but the scheduler is free to migrate it * outside. * * If set, workers are only allowed to run inside @__pod_cpumask. */ bool affn_strict; /* * Below fields aren't properties of a worker_pool. They only modify how * :c:func:`apply_workqueue_attrs` select pools and thus don't * participate in pool hash calculations or equality comparisons. * * If @affn_strict is set, @cpumask isn't a property of a worker_pool * either. */ /** * @affn_scope: unbound CPU affinity scope * * CPU pods are used to improve execution locality of unbound work * items. There are multiple pod types, one for each wq_affn_scope, and * every CPU in the system belongs to one pod in every pod type. CPUs * that belong to the same pod share the worker pool. For example, * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker * pool for each NUMA node. */ enum wq_affn_scope affn_scope; /** * @ordered: work items must be executed one by one in queueing order */ bool ordered; }; static inline struct delayed_work *to_delayed_work(struct work_struct *work) { return container_of(work, struct delayed_work, work); } static inline struct rcu_work *to_rcu_work(struct work_struct *work) { return container_of(work, struct rcu_work, work); } struct execute_work { struct work_struct work; }; #ifdef CONFIG_LOCKDEP /* * NB: because we have to copy the lockdep_map, setting _key * here is required, otherwise it could get initialised to the * copy of the lockdep_map! */ #define __WORK_INIT_LOCKDEP_MAP(n, k) \ .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), #else #define __WORK_INIT_LOCKDEP_MAP(n, k) #endif #define __WORK_INITIALIZER(n, f) { \ .data = WORK_DATA_STATIC_INIT(), \ .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ } #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ .work = __WORK_INITIALIZER((n).work, (f)), \ .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ (tflags) | TIMER_IRQSAFE), \ } #define DECLARE_WORK(n, f) \ struct work_struct n = __WORK_INITIALIZER(n, f) #define DECLARE_DELAYED_WORK(n, f) \ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) #define DECLARE_DEFERRABLE_WORK(n, f) \ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) #ifdef CONFIG_DEBUG_OBJECTS_WORK extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); extern void destroy_delayed_work_on_stack(struct delayed_work *work); static inline unsigned int work_static(struct work_struct *work) { return *work_data_bits(work) & WORK_STRUCT_STATIC; } #else static inline void __init_work(struct work_struct *work, int onstack) { } static inline void destroy_work_on_stack(struct work_struct *work) { } static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } static inline unsigned int work_static(struct work_struct *work) { return 0; } #endif /* * initialize all of a work item in one go * * NOTE! No point in using "atomic_long_set()": using a direct * assignment of the work data initializer allows the compiler * to generate better code. */ #ifdef CONFIG_LOCKDEP #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ do { \ __init_work((_work), _onstack); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ INIT_LIST_HEAD(&(_work)->entry); \ (_work)->func = (_func); \ } while (0) #else #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ do { \ __init_work((_work), _onstack); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ INIT_LIST_HEAD(&(_work)->entry); \ (_work)->func = (_func); \ } while (0) #endif #define __INIT_WORK(_work, _func, _onstack) \ do { \ static __maybe_unused struct lock_class_key __key; \ \ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \ } while (0) #define INIT_WORK(_work, _func) \ __INIT_WORK((_work), (_func), 0) #define INIT_WORK_ONSTACK(_work, _func) \ __INIT_WORK((_work), (_func), 1) #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \ __INIT_WORK_KEY((_work), (_func), 1, _key) #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ __init_timer(&(_work)->timer, \ delayed_work_timer_fn, \ (_tflags) | TIMER_IRQSAFE); \ } while (0) #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ do { \ INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ __init_timer_on_stack(&(_work)->timer, \ delayed_work_timer_fn, \ (_tflags) | TIMER_IRQSAFE); \ } while (0) #define INIT_DELAYED_WORK(_work, _func) \ __INIT_DELAYED_WORK(_work, _func, 0) #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) #define INIT_DEFERRABLE_WORK(_work, _func) \ __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) #define INIT_RCU_WORK(_work, _func) \ INIT_WORK(&(_work)->work, (_func)) #define INIT_RCU_WORK_ONSTACK(_work, _func) \ INIT_WORK_ONSTACK(&(_work)->work, (_func)) /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question */ #define work_pending(work) \ test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) /** * delayed_work_pending - Find out whether a delayable work item is currently * pending * @w: The work item in question */ #define delayed_work_pending(w) \ work_pending(&(w)->work) /* * Workqueue flags and constants. For details, please refer to * Documentation/core-api/workqueue.rst. */ enum wq_flags { WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ /* * Per-cpu workqueues are generally preferred because they tend to * show better performance thanks to cache locality. Per-cpu * workqueues exclude the scheduler from choosing the CPU to * execute the worker threads, which has an unfortunate side effect * of increasing power consumption. * * The scheduler considers a CPU idle if it doesn't have any task * to execute and tries to keep idle cores idle to conserve power; * however, for example, a per-cpu work item scheduled from an * interrupt handler on an idle CPU will force the scheduler to * execute the work item on that CPU breaking the idleness, which in * turn may lead to more scheduling choices which are sub-optimal * in terms of power consumption. * * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default * but become unbound if workqueue.power_efficient kernel param is * specified. Per-cpu workqueues which are identified to * contribute significantly to power-consumption are identified and * marked with this flag and enabling the power_efficient mode * leads to noticeable power saving at the cost of small * performance disadvantage. * * http://thread.gmane.org/gmane.linux.kernel/1480396 */ WQ_POWER_EFFICIENT = 1 << 7, __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ /* BH wq only allows the following flags */ __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI, }; enum wq_consts { WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, /* * Per-node default cap on min_active. Unless explicitly set, min_active * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see * workqueue_struct->min_active definition. */ WQ_DFL_MIN_ACTIVE = 8, }; /* * System-wide workqueues which are always present. * * system_wq is the one used by schedule[_delayed]_work[_on](). * Multi-CPU multi-threaded. There are users which expect relatively * short queue flush time. Don't queue works which can run for too * long. * * system_highpri_wq is similar to system_wq but for work items which * require WQ_HIGHPRI. * * system_long_wq is similar to system_wq but may host long running * works. Queue flushing might take relatively long. * * system_unbound_wq is unbound workqueue. Workers are not bound to * any specific CPU, not concurrency managed, and all queued works are * executed immediately as long as max_active limit is not reached and * resources are available. * * system_freezable_wq is equivalent to system_wq except that it's * freezable. * * *_power_efficient_wq are inclined towards saving power and converted * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, * they are same as their non-power-efficient counterparts - e.g. * system_power_efficient_wq is identical to system_wq if * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. * * system_bh[_highpri]_wq are convenience interface to softirq. BH work items * are executed in the queueing CPU's BH context in the queueing order. */ extern struct workqueue_struct *system_wq; extern struct workqueue_struct *system_highpri_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct *system_freezable_wq; extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq; extern struct workqueue_struct *system_bh_wq; extern struct workqueue_struct *system_bh_highpri_wq; void workqueue_softirq_action(bool highpri); void workqueue_softirq_dead(unsigned int cpu); /** * alloc_workqueue - allocate a workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default * @...: args for @fmt * * For a per-cpu workqueue, @max_active limits the number of in-flight work * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be * executing at most one work item for the workqueue. * * For unbound workqueues, @max_active limits the number of in-flight work items * for the whole system. e.g. @max_active of 16 indicates that that there can be * at most 16 work items executing for the workqueue in the whole system. * * As sharing the same active counter for an unbound workqueue across multiple * NUMA nodes can be expensive, @max_active is distributed to each NUMA node * according to the proportion of the number of online CPUs and enforced * independently. * * Depending on online CPU distribution, a node may end up with per-node * max_active which is significantly lower than @max_active, which can lead to * deadlocks if the per-node concurrency limit is lower than the maximum number * of interdependent work items for the workqueue. * * To guarantee forward progress regardless of online CPU distribution, the * concurrency limit on every node is guaranteed to be equal to or greater than * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means * that the sum of per-node max_active's may be larger than @max_active. * * For detailed information on %WQ_* flags, please refer to * Documentation/core-api/workqueue.rst. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ __printf(1, 4) struct workqueue_struct * alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); #ifdef CONFIG_LOCKDEP /** * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default * @lockdep_map: user-defined lockdep_map * @...: args for @fmt * * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for * workqueues created with the same purpose and to avoid leaking a lockdep_map * on each workqueue creation. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ __printf(1, 5) struct workqueue_struct * alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, struct lockdep_map *lockdep_map, ...); /** * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with * user-defined lockdep_map * * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) * @lockdep_map: user-defined lockdep_map * @args: args for @fmt * * Same as alloc_ordered_workqueue but with the a user-define lockdep_map. * Useful for workqueues created with the same purpose and to avoid leaking a * lockdep_map on each workqueue creation. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \ 1, lockdep_map, ##args) #endif /** * alloc_ordered_workqueue - allocate an ordered workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) * @args: args for @fmt * * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are * implemented as unbound workqueues with @max_active of one. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue(fmt, flags, args...) \ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) #define create_workqueue(name) \ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) #define create_freezable_workqueue(name) \ alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ WQ_MEM_RECLAIM, 1, (name)) #define create_singlethread_workqueue(name) \ alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) #define from_work(var, callback_work, work_fieldname) \ container_of(callback_work, typeof(*var), work_fieldname) extern void destroy_workqueue(struct workqueue_struct *wq); struct workqueue_attrs *alloc_workqueue_attrs(void); void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask); extern bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work); extern bool queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work); extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); extern void __flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); extern int schedule_on_each_cpu(work_func_t func); int execute_in_process_context(work_func_t fn, struct execute_work *); extern bool flush_work(struct work_struct *work); extern bool cancel_work(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work_sync(struct delayed_work *dwork); extern bool disable_work(struct work_struct *work); extern bool disable_work_sync(struct work_struct *work); extern bool enable_work(struct work_struct *work); extern bool disable_delayed_work(struct delayed_work *dwork); extern bool disable_delayed_work_sync(struct delayed_work *dwork); extern bool enable_delayed_work(struct delayed_work *dwork); extern bool flush_rcu_work(struct rcu_work *rwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); extern void workqueue_set_min_active(struct workqueue_struct *wq, int min_active); extern struct work_struct *current_work(void); extern bool current_is_workqueue_rescuer(void); extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); extern unsigned int work_busy(struct work_struct *work); extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); extern void print_worker_info(const char *log_lvl, struct task_struct *task); extern void show_all_workqueues(void); extern void show_freezable_workqueues(void); extern void show_one_workqueue(struct workqueue_struct *wq); extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); /** * queue_work - queue work on a workqueue * @wq: workqueue to use * @work: work to queue * * Returns %false if @work was already on a queue, %true otherwise. * * We queue the work to the CPU on which it was submitted, but if the CPU dies * it can be processed by another CPU. * * Memory-ordering properties: If it returns %true, guarantees that all stores * preceding the call to queue_work() in the program order will be visible from * the CPU which will execute @work by the time such work executes, e.g., * * { x is initially 0 } * * CPU0 CPU1 * * WRITE_ONCE(x, 1); [ @work is being executed ] * r0 = queue_work(wq, work); r1 = READ_ONCE(x); * * Forbids: r0 == true && r1 == 0 */ static inline bool queue_work(struct workqueue_struct *wq, struct work_struct *work) { return queue_work_on(WORK_CPU_UNBOUND, wq, work); } /** * queue_delayed_work - queue work on a workqueue after delay * @wq: workqueue to use * @dwork: delayable work to queue * @delay: number of jiffies to wait before queueing * * Equivalent to queue_delayed_work_on() but tries to use the local CPU. */ static inline bool queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); } /** * mod_delayed_work - modify delay of or queue a delayed work * @wq: workqueue to use * @dwork: work to queue * @delay: number of jiffies to wait before queueing * * mod_delayed_work_on() on local CPU. */ static inline bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); } /** * schedule_work_on - put work task on a specific cpu * @cpu: cpu to put the work task on * @work: job to be done * * This puts a job on a specific cpu */ static inline bool schedule_work_on(int cpu, struct work_struct *work) { return queue_work_on(cpu, system_wq, work); } /** * schedule_work - put work task in global workqueue * @work: job to be done * * Returns %false if @work was already on the kernel-global workqueue and * %true otherwise. * * This puts a job in the kernel-global workqueue if it was not already * queued and leaves it in the same position on the kernel-global * workqueue otherwise. * * Shares the same memory-ordering properties of queue_work(), cf. the * DocBook header of queue_work(). */ static inline bool schedule_work(struct work_struct *work) { return queue_work(system_wq, work); } /** * enable_and_queue_work - Enable and queue a work item on a specific workqueue * @wq: The target workqueue * @work: The work item to be enabled and queued * * This function combines the operations of enable_work() and queue_work(), * providing a convenient way to enable and queue a work item in a single call. * It invokes enable_work() on @work and then queues it if the disable depth * reached 0. Returns %true if the disable depth reached 0 and @work is queued, * and %false otherwise. * * Note that @work is always queued when disable depth reaches zero. If the * desired behavior is queueing only if certain events took place while @work is * disabled, the user should implement the necessary state tracking and perform * explicit conditional queueing after enable_work(). */ static inline bool enable_and_queue_work(struct workqueue_struct *wq, struct work_struct *work) { if (enable_work(work)) { queue_work(wq, work); return true; } return false; } /* * Detect attempt to flush system-wide workqueues at compile time when possible. * Warn attempt to flush system-wide workqueues at runtime. * * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp * for reasons and steps for converting system-wide workqueues into local workqueues. */ extern void __warn_flushing_systemwide_wq(void) __compiletime_warning("Please avoid flushing system-wide workqueues."); /* Please stop using this function, for this function will be removed in near future. */ #define flush_scheduled_work() \ ({ \ __warn_flushing_systemwide_wq(); \ __flush_workqueue(system_wq); \ }) #define flush_workqueue(wq) \ ({ \ struct workqueue_struct *_wq = (wq); \ \ if ((__builtin_constant_p(_wq == system_wq) && \ _wq == system_wq) || \ (__builtin_constant_p(_wq == system_highpri_wq) && \ _wq == system_highpri_wq) || \ (__builtin_constant_p(_wq == system_long_wq) && \ _wq == system_long_wq) || \ (__builtin_constant_p(_wq == system_unbound_wq) && \ _wq == system_unbound_wq) || \ (__builtin_constant_p(_wq == system_freezable_wq) && \ _wq == system_freezable_wq) || \ (__builtin_constant_p(_wq == system_power_efficient_wq) && \ _wq == system_power_efficient_wq) || \ (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ _wq == system_freezable_power_efficient_wq)) \ __warn_flushing_systemwide_wq(); \ __flush_workqueue(_wq); \ }) /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use * @dwork: job to be done * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work_on(cpu, system_wq, dwork, delay); } /** * schedule_delayed_work - put work task in global workqueue after delay * @dwork: job to be done * @delay: number of jiffies to wait or 0 for immediate execution * * After waiting for a given time this puts a job in the kernel-global * workqueue. */ static inline bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work(system_wq, dwork, delay); } #ifndef CONFIG_SMP static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) { return fn(arg); } static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) { return fn(arg); } #else long work_on_cpu_key(int cpu, long (*fn)(void *), void *arg, struct lock_class_key *key); /* * A new key is defined for each caller to make sure the work * associated with the function doesn't share its locking class. */ #define work_on_cpu(_cpu, _fn, _arg) \ ({ \ static struct lock_class_key __key; \ \ work_on_cpu_key(_cpu, _fn, _arg, &__key); \ }) long work_on_cpu_safe_key(int cpu, long (*fn)(void *), void *arg, struct lock_class_key *key); /* * A new key is defined for each caller to make sure the work * associated with the function doesn't share its locking class. */ #define work_on_cpu_safe(_cpu, _fn, _arg) \ ({ \ static struct lock_class_key __key; \ \ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ }) #endif /* CONFIG_SMP */ #ifdef CONFIG_FREEZER extern void freeze_workqueues_begin(void); extern bool freeze_workqueues_busy(void); extern void thaw_workqueues(void); #endif /* CONFIG_FREEZER */ #ifdef CONFIG_SYSFS int workqueue_sysfs_register(struct workqueue_struct *wq); #else /* CONFIG_SYSFS */ static inline int workqueue_sysfs_register(struct workqueue_struct *wq) { return 0; } #endif /* CONFIG_SYSFS */ #ifdef CONFIG_WQ_WATCHDOG void wq_watchdog_touch(int cpu); #else /* CONFIG_WQ_WATCHDOG */ static inline void wq_watchdog_touch(int cpu) { } #endif /* CONFIG_WQ_WATCHDOG */ #ifdef CONFIG_SMP int workqueue_prepare_cpu(unsigned int cpu); int workqueue_online_cpu(unsigned int cpu); int workqueue_offline_cpu(unsigned int cpu); #endif void __init workqueue_init_early(void); void __init workqueue_init(void); void __init workqueue_init_topology(void); #endif
11 7 11 3 11 11 9 3 3 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for Zarlink DVB-T ZL10353 demodulator * * Copyright (C) 2006, 2007 Christopher Pascoe <c.pascoe@itee.uq.edu.au> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/div64.h> #include <media/dvb_frontend.h> #include "zl10353_priv.h" #include "zl10353.h" struct zl10353_state { struct i2c_adapter *i2c; struct dvb_frontend frontend; struct zl10353_config config; u32 bandwidth; u32 ucblocks; u32 frequency; }; static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "zl10353: " args); \ } while (0) static int debug_regs; static int zl10353_single_write(struct dvb_frontend *fe, u8 reg, u8 val) { struct zl10353_state *state = fe->demodulator_priv; u8 buf[2] = { reg, val }; struct i2c_msg msg = { .addr = state->config.demod_address, .flags = 0, .buf = buf, .len = 2 }; int err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk("zl10353: write to reg %x failed (err = %d)!\n", reg, err); return err; } return 0; } static int zl10353_write(struct dvb_frontend *fe, const u8 ibuf[], int ilen) { int err, i; for (i = 0; i < ilen - 1; i++) if ((err = zl10353_single_write(fe, ibuf[0] + i, ibuf[i + 1]))) return err; return 0; } static int zl10353_read_register(struct zl10353_state *state, u8 reg) { int ret; u8 b0[1] = { reg }; u8 b1[1] = { 0 }; struct i2c_msg msg[2] = { { .addr = state->config.demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk("%s: readreg error (reg=%d, ret==%i)\n", __func__, reg, ret); return ret; } return b1[0]; } static void zl10353_dump_regs(struct dvb_frontend *fe) { struct zl10353_state *state = fe->demodulator_priv; int ret; u8 reg; /* Dump all registers. */ for (reg = 0; ; reg++) { if (reg % 16 == 0) { if (reg) printk(KERN_CONT "\n"); printk(KERN_DEBUG "%02x:", reg); } ret = zl10353_read_register(state, reg); if (ret >= 0) printk(KERN_CONT " %02x", (u8)ret); else printk(KERN_CONT " --"); if (reg == 0xff) break; } printk(KERN_CONT "\n"); } static void zl10353_calc_nominal_rate(struct dvb_frontend *fe, u32 bandwidth, u16 *nominal_rate) { struct zl10353_state *state = fe->demodulator_priv; u32 adc_clock = 450560; /* 45.056 MHz */ u64 value; u8 bw = bandwidth / 1000000; if (state->config.adc_clock) adc_clock = state->config.adc_clock; value = (u64)10 * (1 << 23) / 7 * 125; value = (bw * value) + adc_clock / 2; *nominal_rate = div_u64(value, adc_clock); dprintk("%s: bw %d, adc_clock %d => 0x%x\n", __func__, bw, adc_clock, *nominal_rate); } static void zl10353_calc_input_freq(struct dvb_frontend *fe, u16 *input_freq) { struct zl10353_state *state = fe->demodulator_priv; u32 adc_clock = 450560; /* 45.056 MHz */ int if2 = 361667; /* 36.1667 MHz */ int ife; u64 value; if (state->config.adc_clock) adc_clock = state->config.adc_clock; if (state->config.if2) if2 = state->config.if2; if (adc_clock >= if2 * 2) ife = if2; else { ife = adc_clock - (if2 % adc_clock); if (ife > adc_clock / 2) ife = adc_clock - ife; } value = div_u64((u64)65536 * ife + adc_clock / 2, adc_clock); *input_freq = -value; dprintk("%s: if2 %d, ife %d, adc_clock %d => %d / 0x%x\n", __func__, if2, ife, adc_clock, -(int)value, *input_freq); } static int zl10353_sleep(struct dvb_frontend *fe) { static u8 zl10353_softdown[] = { 0x50, 0x0C, 0x44 }; zl10353_write(fe, zl10353_softdown, sizeof(zl10353_softdown)); return 0; } static int zl10353_set_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct zl10353_state *state = fe->demodulator_priv; u16 nominal_rate, input_freq; u8 pllbuf[6] = { 0x67 }, acq_ctl = 0; u16 tps = 0; state->frequency = c->frequency; zl10353_single_write(fe, RESET, 0x80); udelay(200); zl10353_single_write(fe, 0xEA, 0x01); udelay(200); zl10353_single_write(fe, 0xEA, 0x00); zl10353_single_write(fe, AGC_TARGET, 0x28); if (c->transmission_mode != TRANSMISSION_MODE_AUTO) acq_ctl |= (1 << 0); if (c->guard_interval != GUARD_INTERVAL_AUTO) acq_ctl |= (1 << 1); zl10353_single_write(fe, ACQ_CTL, acq_ctl); switch (c->bandwidth_hz) { case 6000000: /* These are extrapolated from the 7 and 8MHz values */ zl10353_single_write(fe, MCLK_RATIO, 0x97); zl10353_single_write(fe, 0x64, 0x34); zl10353_single_write(fe, 0xcc, 0xdd); break; case 7000000: zl10353_single_write(fe, MCLK_RATIO, 0x86); zl10353_single_write(fe, 0x64, 0x35); zl10353_single_write(fe, 0xcc, 0x73); break; default: c->bandwidth_hz = 8000000; fallthrough; case 8000000: zl10353_single_write(fe, MCLK_RATIO, 0x75); zl10353_single_write(fe, 0x64, 0x36); zl10353_single_write(fe, 0xcc, 0x73); } zl10353_calc_nominal_rate(fe, c->bandwidth_hz, &nominal_rate); zl10353_single_write(fe, TRL_NOMINAL_RATE_1, msb(nominal_rate)); zl10353_single_write(fe, TRL_NOMINAL_RATE_0, lsb(nominal_rate)); state->bandwidth = c->bandwidth_hz; zl10353_calc_input_freq(fe, &input_freq); zl10353_single_write(fe, INPUT_FREQ_1, msb(input_freq)); zl10353_single_write(fe, INPUT_FREQ_0, lsb(input_freq)); /* Hint at TPS settings */ switch (c->code_rate_HP) { case FEC_2_3: tps |= (1 << 7); break; case FEC_3_4: tps |= (2 << 7); break; case FEC_5_6: tps |= (3 << 7); break; case FEC_7_8: tps |= (4 << 7); break; case FEC_1_2: case FEC_AUTO: break; default: return -EINVAL; } switch (c->code_rate_LP) { case FEC_2_3: tps |= (1 << 4); break; case FEC_3_4: tps |= (2 << 4); break; case FEC_5_6: tps |= (3 << 4); break; case FEC_7_8: tps |= (4 << 4); break; case FEC_1_2: case FEC_AUTO: break; case FEC_NONE: if (c->hierarchy == HIERARCHY_AUTO || c->hierarchy == HIERARCHY_NONE) break; fallthrough; default: return -EINVAL; } switch (c->modulation) { case QPSK: break; case QAM_AUTO: case QAM_16: tps |= (1 << 13); break; case QAM_64: tps |= (2 << 13); break; default: return -EINVAL; } switch (c->transmission_mode) { case TRANSMISSION_MODE_2K: case TRANSMISSION_MODE_AUTO: break; case TRANSMISSION_MODE_8K: tps |= (1 << 0); break; default: return -EINVAL; } switch (c->guard_interval) { case GUARD_INTERVAL_1_32: case GUARD_INTERVAL_AUTO: break; case GUARD_INTERVAL_1_16: tps |= (1 << 2); break; case GUARD_INTERVAL_1_8: tps |= (2 << 2); break; case GUARD_INTERVAL_1_4: tps |= (3 << 2); break; default: return -EINVAL; } switch (c->hierarchy) { case HIERARCHY_AUTO: case HIERARCHY_NONE: break; case HIERARCHY_1: tps |= (1 << 10); break; case HIERARCHY_2: tps |= (2 << 10); break; case HIERARCHY_4: tps |= (3 << 10); break; default: return -EINVAL; } zl10353_single_write(fe, TPS_GIVEN_1, msb(tps)); zl10353_single_write(fe, TPS_GIVEN_0, lsb(tps)); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* * If there is no tuner attached to the secondary I2C bus, we call * set_params to program a potential tuner attached somewhere else. * Otherwise, we update the PLL registers via calc_regs. */ if (state->config.no_tuner) { if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } } else if (fe->ops.tuner_ops.calc_regs) { fe->ops.tuner_ops.calc_regs(fe, pllbuf + 1, 5); pllbuf[1] <<= 1; zl10353_write(fe, pllbuf, sizeof(pllbuf)); } zl10353_single_write(fe, 0x5F, 0x13); /* If no attached tuner or invalid PLL registers, just start the FSM. */ if (state->config.no_tuner || fe->ops.tuner_ops.calc_regs == NULL) zl10353_single_write(fe, FSM_GO, 0x01); else zl10353_single_write(fe, TUNER_GO, 0x01); return 0; } static int zl10353_get_parameters(struct dvb_frontend *fe, struct dtv_frontend_properties *c) { struct zl10353_state *state = fe->demodulator_priv; int s6, s9; u16 tps; static const u8 tps_fec_to_api[8] = { FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_7_8, FEC_AUTO, FEC_AUTO, FEC_AUTO }; s6 = zl10353_read_register(state, STATUS_6); s9 = zl10353_read_register(state, STATUS_9); if (s6 < 0 || s9 < 0) return -EREMOTEIO; if ((s6 & (1 << 5)) == 0 || (s9 & (1 << 4)) == 0) return -EINVAL; /* no FE or TPS lock */ tps = zl10353_read_register(state, TPS_RECEIVED_1) << 8 | zl10353_read_register(state, TPS_RECEIVED_0); c->code_rate_HP = tps_fec_to_api[(tps >> 7) & 7]; c->code_rate_LP = tps_fec_to_api[(tps >> 4) & 7]; switch ((tps >> 13) & 3) { case 0: c->modulation = QPSK; break; case 1: c->modulation = QAM_16; break; case 2: c->modulation = QAM_64; break; default: c->modulation = QAM_AUTO; break; } c->transmission_mode = (tps & 0x01) ? TRANSMISSION_MODE_8K : TRANSMISSION_MODE_2K; switch ((tps >> 2) & 3) { case 0: c->guard_interval = GUARD_INTERVAL_1_32; break; case 1: c->guard_interval = GUARD_INTERVAL_1_16; break; case 2: c->guard_interval = GUARD_INTERVAL_1_8; break; case 3: c->guard_interval = GUARD_INTERVAL_1_4; break; default: c->guard_interval = GUARD_INTERVAL_AUTO; break; } switch ((tps >> 10) & 7) { case 0: c->hierarchy = HIERARCHY_NONE; break; case 1: c->hierarchy = HIERARCHY_1; break; case 2: c->hierarchy = HIERARCHY_2; break; case 3: c->hierarchy = HIERARCHY_4; break; default: c->hierarchy = HIERARCHY_AUTO; break; } c->frequency = state->frequency; c->bandwidth_hz = state->bandwidth; c->inversion = INVERSION_AUTO; return 0; } static int zl10353_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct zl10353_state *state = fe->demodulator_priv; int s6, s7, s8; if ((s6 = zl10353_read_register(state, STATUS_6)) < 0) return -EREMOTEIO; if ((s7 = zl10353_read_register(state, STATUS_7)) < 0) return -EREMOTEIO; if ((s8 = zl10353_read_register(state, STATUS_8)) < 0) return -EREMOTEIO; *status = 0; if (s6 & (1 << 2)) *status |= FE_HAS_CARRIER; if (s6 & (1 << 1)) *status |= FE_HAS_VITERBI; if (s6 & (1 << 5)) *status |= FE_HAS_LOCK; if (s7 & (1 << 4)) *status |= FE_HAS_SYNC; if (s8 & (1 << 6)) *status |= FE_HAS_SIGNAL; if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) *status &= ~FE_HAS_LOCK; return 0; } static int zl10353_read_ber(struct dvb_frontend *fe, u32 *ber) { struct zl10353_state *state = fe->demodulator_priv; *ber = zl10353_read_register(state, RS_ERR_CNT_2) << 16 | zl10353_read_register(state, RS_ERR_CNT_1) << 8 | zl10353_read_register(state, RS_ERR_CNT_0); return 0; } static int zl10353_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct zl10353_state *state = fe->demodulator_priv; u16 signal = zl10353_read_register(state, AGC_GAIN_1) << 10 | zl10353_read_register(state, AGC_GAIN_0) << 2 | 3; *strength = ~signal; return 0; } static int zl10353_read_snr(struct dvb_frontend *fe, u16 *snr) { struct zl10353_state *state = fe->demodulator_priv; u8 _snr; if (debug_regs) zl10353_dump_regs(fe); _snr = zl10353_read_register(state, SNR); *snr = 10 * _snr / 8; return 0; } static int zl10353_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct zl10353_state *state = fe->demodulator_priv; u32 ubl = 0; ubl = zl10353_read_register(state, RS_UBC_1) << 8 | zl10353_read_register(state, RS_UBC_0); state->ucblocks += ubl; *ucblocks = state->ucblocks; return 0; } static int zl10353_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fe_tune_settings) { fe_tune_settings->min_delay_ms = 1000; fe_tune_settings->step_size = 0; fe_tune_settings->max_drift = 0; return 0; } static int zl10353_init(struct dvb_frontend *fe) { struct zl10353_state *state = fe->demodulator_priv; u8 zl10353_reset_attach[6] = { 0x50, 0x03, 0x64, 0x46, 0x15, 0x0F }; if (debug_regs) zl10353_dump_regs(fe); if (state->config.parallel_ts) zl10353_reset_attach[2] &= ~0x20; if (state->config.clock_ctl_1) zl10353_reset_attach[3] = state->config.clock_ctl_1; if (state->config.pll_0) zl10353_reset_attach[4] = state->config.pll_0; /* Do a "hard" reset if not already done */ if (zl10353_read_register(state, 0x50) != zl10353_reset_attach[1] || zl10353_read_register(state, 0x51) != zl10353_reset_attach[2]) { zl10353_write(fe, zl10353_reset_attach, sizeof(zl10353_reset_attach)); if (debug_regs) zl10353_dump_regs(fe); } return 0; } static int zl10353_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct zl10353_state *state = fe->demodulator_priv; u8 val = 0x0a; if (state->config.disable_i2c_gate_ctrl) { /* No tuner attached to the internal I2C bus */ /* If set enable I2C bridge, the main I2C bus stopped hardly */ return 0; } if (enable) val |= 0x10; return zl10353_single_write(fe, 0x62, val); } static void zl10353_release(struct dvb_frontend *fe) { struct zl10353_state *state = fe->demodulator_priv; kfree(state); } static const struct dvb_frontend_ops zl10353_ops; struct dvb_frontend *zl10353_attach(const struct zl10353_config *config, struct i2c_adapter *i2c) { struct zl10353_state *state = NULL; int id; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct zl10353_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->i2c = i2c; memcpy(&state->config, config, sizeof(struct zl10353_config)); /* check if the demod is there */ id = zl10353_read_register(state, CHIP_ID); if ((id != ID_ZL10353) && (id != ID_CE6230) && (id != ID_CE6231)) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &zl10353_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static const struct dvb_frontend_ops zl10353_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Zarlink ZL10353 DVB-T", .frequency_min_hz = 174 * MHz, .frequency_max_hz = 862 * MHz, .frequency_stepsize_hz = 166667, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .release = zl10353_release, .init = zl10353_init, .sleep = zl10353_sleep, .i2c_gate_ctrl = zl10353_i2c_gate_ctrl, .write = zl10353_write, .set_frontend = zl10353_set_parameters, .get_frontend = zl10353_get_parameters, .get_tune_settings = zl10353_get_tune_settings, .read_status = zl10353_read_status, .read_ber = zl10353_read_ber, .read_signal_strength = zl10353_read_signal_strength, .read_snr = zl10353_read_snr, .read_ucblocks = zl10353_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); module_param(debug_regs, int, 0644); MODULE_PARM_DESC(debug_regs, "Turn on/off frontend register dumps (default:off)."); MODULE_DESCRIPTION("Zarlink ZL10353 DVB-T demodulator driver"); MODULE_AUTHOR("Chris Pascoe"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL_GPL(zl10353_attach);
12 12 9 12 12 6 12 6 3 12 6 12 3 12 12 12 1 12 3 12 2 12 12 12 12 12 12 12 12 12 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 // SPDX-License-Identifier: GPL-2.0-or-later /* * ov534-ov9xxx gspca driver * * Copyright (C) 2009-2011 Jean-Francois Moine http://moinejf.free.fr * Copyright (C) 2008 Antonio Ospite <ospite@studenti.unina.it> * Copyright (C) 2008 Jim Paris <jim@jtan.com> * * Based on a prototype written by Mark Ferrell <majortrips@gmail.com> * USB protocol reverse engineered by Jim Paris <jim@jtan.com> * https://jim.sh/svn/jim/devl/playstation/ps3/eye/test/ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "ov534_9" #include "gspca.h" #define OV534_REG_ADDRESS 0xf1 /* sensor address */ #define OV534_REG_SUBADDR 0xf2 #define OV534_REG_WRITE 0xf3 #define OV534_REG_READ 0xf4 #define OV534_REG_OPERATION 0xf5 #define OV534_REG_STATUS 0xf6 #define OV534_OP_WRITE_3 0x37 #define OV534_OP_WRITE_2 0x33 #define OV534_OP_READ_2 0xf9 #define CTRL_TIMEOUT 500 MODULE_AUTHOR("Jean-Francois Moine <moinejf@free.fr>"); MODULE_DESCRIPTION("GSPCA/OV534_9 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ __u32 last_pts; u8 last_fid; u8 sensor; }; enum sensors { SENSOR_OV965x, /* ov9657 */ SENSOR_OV971x, /* ov9712 */ SENSOR_OV562x, /* ov5621 */ SENSOR_OV361x, /* ov3610 */ NSENSORS }; static const struct v4l2_pix_format ov965x_mode[] = { #define QVGA_MODE 0 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, #define VGA_MODE 1 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, #define SVGA_MODE 2 {800, 600, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 600 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, #define XGA_MODE 3 {1024, 768, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 1024, .sizeimage = 1024 * 768 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, #define SXGA_MODE 4 {1280, 1024, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 1024 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG}, }; static const struct v4l2_pix_format ov971x_mode[] = { {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB } }; static const struct v4l2_pix_format ov562x_mode[] = { {2592, 1680, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 2592, .sizeimage = 2592 * 1680, .colorspace = V4L2_COLORSPACE_SRGB } }; enum ov361x { ov361x_2048 = 0, ov361x_1600, ov361x_1024, ov361x_640, ov361x_320, ov361x_160, ov361x_last }; static const struct v4l2_pix_format ov361x_mode[] = { {0x800, 0x600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 0x800, .sizeimage = 0x800 * 0x600, .colorspace = V4L2_COLORSPACE_SRGB}, {1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 1600, .sizeimage = 1600 * 1200, .colorspace = V4L2_COLORSPACE_SRGB}, {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 768, .sizeimage = 1024 * 768, .colorspace = V4L2_COLORSPACE_SRGB}, {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB}, {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB} }; static const u8 ov361x_start_2048[][2] = { {0x12, 0x80}, {0x13, 0xcf}, {0x14, 0x40}, {0x15, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x04, 0x70}, {0x0d, 0x40}, {0x0f, 0x47}, {0x11, 0x81}, {0x32, 0x36}, {0x33, 0x0c}, {0x34, 0x00}, {0x35, 0x90}, {0x12, 0x00}, {0x17, 0x10}, {0x18, 0x90}, {0x19, 0x00}, {0x1a, 0xc0}, }; static const u8 ov361x_bridge_start_2048[][2] = { {0xf1, 0x60}, {0x88, 0x00}, {0x89, 0x08}, {0x8a, 0x00}, {0x8b, 0x06}, {0x8c, 0x01}, {0x8d, 0x10}, {0x1c, 0x00}, {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1c, 0x0a}, {0x1d, 0x2e}, {0x1d, 0x1e}, }; static const u8 ov361x_start_1600[][2] = { {0x12, 0x80}, {0x13, 0xcf}, {0x14, 0x40}, {0x15, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x04, 0x70}, {0x0d, 0x40}, {0x0f, 0x47}, {0x11, 0x81}, {0x32, 0x36}, {0x33, 0x0C}, {0x34, 0x00}, {0x35, 0x90}, {0x12, 0x00}, {0x17, 0x10}, {0x18, 0x90}, {0x19, 0x00}, {0x1a, 0xc0}, }; static const u8 ov361x_bridge_start_1600[][2] = { {0xf1, 0x60}, /* Hsize[7:0] */ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ {0x89, 0x08}, /* Vsize[7:0] */ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ {0x8b, 0x06}, /* for Iso */ {0x8c, 0x01}, /* RAW input */ {0x8d, 0x10}, {0x1c, 0x00}, /* RAW output, Iso transfer */ {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ {0x1d, 0x2e}, /* for Iso */ {0x1d, 0x1e}, }; static const u8 ov361x_start_1024[][2] = { {0x12, 0x80}, {0x13, 0xcf}, {0x14, 0x40}, {0x15, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x04, 0x70}, {0x0d, 0x40}, {0x0f, 0x47}, {0x11, 0x81}, {0x32, 0x36}, {0x33, 0x0C}, {0x34, 0x00}, {0x35, 0x90}, {0x12, 0x40}, {0x17, 0x1f}, {0x18, 0x5f}, {0x19, 0x00}, {0x1a, 0x68}, }; static const u8 ov361x_bridge_start_1024[][2] = { {0xf1, 0x60}, /* Hsize[7:0] */ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ {0x89, 0x04}, /* Vsize[7:0] */ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ {0x8b, 0x03}, /* for Iso */ {0x8c, 0x01}, /* RAW input */ {0x8d, 0x10}, {0x1c, 0x00}, /* RAW output, Iso transfer */ {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ {0x1d, 0x2e}, /* for Iso */ {0x1d, 0x1e}, }; static const u8 ov361x_start_640[][2] = { {0x12, 0x80}, {0x13, 0xcf}, {0x14, 0x40}, {0x15, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x04, 0x70}, {0x0d, 0x40}, {0x0f, 0x47}, {0x11, 0x81}, {0x32, 0x36}, {0x33, 0x0C}, {0x34, 0x00}, {0x35, 0x90}, {0x12, 0x40}, {0x17, 0x1f}, {0x18, 0x5f}, {0x19, 0x00}, {0x1a, 0x68}, }; static const u8 ov361x_bridge_start_640[][2] = { {0xf1, 0x60}, /* Hsize[7:0]*/ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ {0x89, 0x04}, /* Vsize[7:0] */ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ {0x8b, 0x03}, /* for Iso */ {0x8c, 0x01}, /* RAW input */ {0x8d, 0x10}, {0x1c, 0x00}, /* RAW output, Iso transfer */ {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ {0x1d, 0x2e}, /* for Iso */ {0x1d, 0x1e}, }; static const u8 ov361x_start_320[][2] = { {0x12, 0x80}, {0x13, 0xcf}, {0x14, 0x40}, {0x15, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x04, 0x70}, {0x0d, 0x40}, {0x0f, 0x47}, {0x11, 0x81}, {0x32, 0x36}, {0x33, 0x0C}, {0x34, 0x00}, {0x35, 0x90}, {0x12, 0x40}, {0x17, 0x1f}, {0x18, 0x5f}, {0x19, 0x00}, {0x1a, 0x68}, }; static const u8 ov361x_bridge_start_320[][2] = { {0xf1, 0x60}, /* Hsize[7:0] */ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ {0x89, 0x04}, /* Vsize[7:0] */ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ {0x8b, 0x03}, /* for Iso */ {0x8c, 0x01}, /* RAW input */ {0x8d, 0x10}, {0x1c, 0x00}, /* RAW output, Iso transfer; */ {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ {0x1d, 0x2e}, /* for Iso */ {0x1d, 0x1e}, }; static const u8 ov361x_start_160[][2] = { {0x12, 0x80}, {0x13, 0xcf}, {0x14, 0x40}, {0x15, 0x00}, {0x01, 0x80}, {0x02, 0x80}, {0x04, 0x70}, {0x0d, 0x40}, {0x0f, 0x47}, {0x11, 0x81}, {0x32, 0x36}, {0x33, 0x0C}, {0x34, 0x00}, {0x35, 0x90}, {0x12, 0x40}, {0x17, 0x1f}, {0x18, 0x5f}, {0x19, 0x00}, {0x1a, 0x68}, }; static const u8 ov361x_bridge_start_160[][2] = { {0xf1, 0x60}, /* Hsize[7:0] */ {0x88, 0x00}, /* Hsize[15:8] Write Only, can't read */ {0x89, 0x04}, /* Vsize[7:0] */ {0x8a, 0x00}, /* Vsize[15:8] Write Only, can't read */ {0x8b, 0x03}, /* for Iso */ {0x8c, 0x01}, /* RAW input */ {0x8d, 0x10}, {0x1c, 0x00}, /* RAW output, Iso transfer */ {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1c, 0x0a}, /* turn off JPEG, Iso mode */ {0x1d, 0x2e}, /* for Iso */ {0x1d, 0x1e}, }; static const u8 bridge_init[][2] = { {0x88, 0xf8}, {0x89, 0xff}, {0x76, 0x03}, {0x92, 0x03}, {0x95, 0x10}, {0xe2, 0x00}, {0xe7, 0x3e}, {0x8d, 0x1c}, {0x8e, 0x00}, {0x8f, 0x00}, {0x1f, 0x00}, {0xc3, 0xf9}, {0x89, 0xff}, {0x88, 0xf8}, {0x76, 0x03}, {0x92, 0x01}, {0x93, 0x18}, {0x1c, 0x0a}, {0x1d, 0x48}, {0xc0, 0x50}, {0xc1, 0x3c}, {0x34, 0x05}, {0xc2, 0x0c}, {0xc3, 0xf9}, {0x34, 0x05}, {0xe7, 0x2e}, {0x31, 0xf9}, {0x35, 0x02}, {0xd9, 0x10}, {0x25, 0x42}, {0x94, 0x11}, }; static const u8 ov965x_init[][2] = { {0x12, 0x80}, /* com7 - SSCB reset */ {0x00, 0x00}, /* gain */ {0x01, 0x80}, /* blue */ {0x02, 0x80}, /* red */ {0x03, 0x1b}, /* vref */ {0x04, 0x03}, /* com1 - exposure low bits */ {0x0b, 0x57}, /* ver */ {0x0e, 0x61}, /* com5 */ {0x0f, 0x42}, /* com6 */ {0x11, 0x00}, /* clkrc */ {0x12, 0x02}, /* com7 - 15fps VGA YUYV */ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ {0x14, 0x28}, /* com9 */ {0x16, 0x24}, /* reg16 */ {0x17, 0x1d}, /* hstart*/ {0x18, 0xbd}, /* hstop */ {0x19, 0x01}, /* vstrt */ {0x1a, 0x81}, /* vstop*/ {0x1e, 0x04}, /* mvfp */ {0x24, 0x3c}, /* aew */ {0x25, 0x36}, /* aeb */ {0x26, 0x71}, /* vpt */ {0x27, 0x08}, /* bbias */ {0x28, 0x08}, /* gbbias */ {0x29, 0x15}, /* gr com */ {0x2a, 0x00}, /* exhch */ {0x2b, 0x00}, /* exhcl */ {0x2c, 0x08}, /* rbias */ {0x32, 0xff}, /* href */ {0x33, 0x00}, /* chlf */ {0x34, 0x3f}, /* aref1 */ {0x35, 0x00}, /* aref2 */ {0x36, 0xf8}, /* aref3 */ {0x38, 0x72}, /* adc2 */ {0x39, 0x57}, /* aref4 */ {0x3a, 0x80}, /* tslb - yuyv */ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ {0x3d, 0x99}, /* com13 */ {0x3f, 0xc1}, /* edge */ {0x40, 0xc0}, /* com15 */ {0x41, 0x40}, /* com16 */ {0x42, 0xc0}, /* com17 */ {0x43, 0x0a}, /* rsvd */ {0x44, 0xf0}, {0x45, 0x46}, {0x46, 0x62}, {0x47, 0x2a}, {0x48, 0x3c}, {0x4a, 0xfc}, {0x4b, 0xfc}, {0x4c, 0x7f}, {0x4d, 0x7f}, {0x4e, 0x7f}, {0x4f, 0x98}, /* matrix */ {0x50, 0x98}, {0x51, 0x00}, {0x52, 0x28}, {0x53, 0x70}, {0x54, 0x98}, {0x58, 0x1a}, /* matrix coef sign */ {0x59, 0x85}, /* AWB control */ {0x5a, 0xa9}, {0x5b, 0x64}, {0x5c, 0x84}, {0x5d, 0x53}, {0x5e, 0x0e}, {0x5f, 0xf0}, /* AWB blue limit */ {0x60, 0xf0}, /* AWB red limit */ {0x61, 0xf0}, /* AWB green limit */ {0x62, 0x00}, /* lcc1 */ {0x63, 0x00}, /* lcc2 */ {0x64, 0x02}, /* lcc3 */ {0x65, 0x16}, /* lcc4 */ {0x66, 0x01}, /* lcc5 */ {0x69, 0x02}, /* hv */ {0x6b, 0x5a}, /* dbvl */ {0x6c, 0x04}, {0x6d, 0x55}, {0x6e, 0x00}, {0x6f, 0x9d}, {0x70, 0x21}, /* dnsth */ {0x71, 0x78}, {0x72, 0x00}, /* poidx */ {0x73, 0x01}, /* pckdv */ {0x74, 0x3a}, /* xindx */ {0x75, 0x35}, /* yindx */ {0x76, 0x01}, {0x77, 0x02}, {0x7a, 0x12}, /* gamma curve */ {0x7b, 0x08}, {0x7c, 0x16}, {0x7d, 0x30}, {0x7e, 0x5e}, {0x7f, 0x72}, {0x80, 0x82}, {0x81, 0x8e}, {0x82, 0x9a}, {0x83, 0xa4}, {0x84, 0xac}, {0x85, 0xb8}, {0x86, 0xc3}, {0x87, 0xd6}, {0x88, 0xe6}, {0x89, 0xf2}, {0x8a, 0x03}, {0x8c, 0x89}, /* com19 */ {0x14, 0x28}, /* com9 */ {0x90, 0x7d}, {0x91, 0x7b}, {0x9d, 0x03}, /* lcc6 */ {0x9e, 0x04}, /* lcc7 */ {0x9f, 0x7a}, {0xa0, 0x79}, {0xa1, 0x40}, /* aechm */ {0xa4, 0x50}, /* com21 */ {0xa5, 0x68}, /* com26 */ {0xa6, 0x4a}, /* AWB green */ {0xa8, 0xc1}, /* refa8 */ {0xa9, 0xef}, /* refa9 */ {0xaa, 0x92}, {0xab, 0x04}, {0xac, 0x80}, /* black level control */ {0xad, 0x80}, {0xae, 0x80}, {0xaf, 0x80}, {0xb2, 0xf2}, {0xb3, 0x20}, {0xb4, 0x20}, /* ctrlb4 */ {0xb5, 0x00}, {0xb6, 0xaf}, {0xbb, 0xae}, {0xbc, 0x7f}, /* ADC channel offsets */ {0xdb, 0x7f}, {0xbe, 0x7f}, {0xbf, 0x7f}, {0xc0, 0xe2}, {0xc1, 0xc0}, {0xc2, 0x01}, {0xc3, 0x4e}, {0xc6, 0x85}, {0xc7, 0x80}, /* com24 */ {0xc9, 0xe0}, {0xca, 0xe8}, {0xcb, 0xf0}, {0xcc, 0xd8}, {0xcd, 0xf1}, {0x4f, 0x98}, /* matrix */ {0x50, 0x98}, {0x51, 0x00}, {0x52, 0x28}, {0x53, 0x70}, {0x54, 0x98}, {0x58, 0x1a}, {0xff, 0x41}, /* read 41, write ff 00 */ {0x41, 0x40}, /* com16 */ {0xc5, 0x03}, /* 60 Hz banding filter */ {0x6a, 0x02}, /* 50 Hz banding filter */ {0x12, 0x62}, /* com7 - 30fps VGA YUV */ {0x36, 0xfa}, /* aref3 */ {0x69, 0x0a}, /* hv */ {0x8c, 0x89}, /* com22 */ {0x14, 0x28}, /* com9 */ {0x3e, 0x0c}, {0x41, 0x40}, /* com16 */ {0x72, 0x00}, {0x73, 0x00}, {0x74, 0x3a}, {0x75, 0x35}, {0x76, 0x01}, {0xc7, 0x80}, {0x03, 0x12}, /* vref */ {0x17, 0x16}, /* hstart */ {0x18, 0x02}, /* hstop */ {0x19, 0x01}, /* vstrt */ {0x1a, 0x3d}, /* vstop */ {0x32, 0xff}, /* href */ {0xc0, 0xaa}, }; static const u8 bridge_init_2[][2] = { {0x94, 0xaa}, {0xf1, 0x60}, {0xe5, 0x04}, {0xc0, 0x50}, {0xc1, 0x3c}, {0x8c, 0x00}, {0x8d, 0x1c}, {0x34, 0x05}, {0xc2, 0x0c}, {0xc3, 0xf9}, {0xda, 0x01}, {0x50, 0x00}, {0x51, 0xa0}, {0x52, 0x3c}, {0x53, 0x00}, {0x54, 0x00}, {0x55, 0x00}, {0x57, 0x00}, {0x5c, 0x00}, {0x5a, 0xa0}, {0x5b, 0x78}, {0x35, 0x02}, {0xd9, 0x10}, {0x94, 0x11}, }; static const u8 ov965x_init_2[][2] = { {0x3b, 0xc4}, {0x1e, 0x04}, /* mvfp */ {0x13, 0xe0}, /* com8 */ {0x00, 0x00}, /* gain */ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ {0x11, 0x03}, /* clkrc */ {0x6b, 0x5a}, /* dblv */ {0x6a, 0x05}, {0xc5, 0x07}, {0xa2, 0x4b}, {0xa3, 0x3e}, {0x2d, 0x00}, {0xff, 0x42}, /* read 42, write ff 00 */ {0x42, 0xc0}, /* com17 */ {0x2d, 0x00}, {0xff, 0x42}, /* read 42, write ff 00 */ {0x42, 0xc1}, /* com17 */ /* sharpness */ {0x3f, 0x01}, {0xff, 0x42}, /* read 42, write ff 00 */ {0x42, 0xc1}, /* com17 */ /* saturation */ {0x4f, 0x98}, /* matrix */ {0x50, 0x98}, {0x51, 0x00}, {0x52, 0x28}, {0x53, 0x70}, {0x54, 0x98}, {0x58, 0x1a}, {0xff, 0x41}, /* read 41, write ff 00 */ {0x41, 0x40}, /* com16 */ /* contrast */ {0x56, 0x40}, /* brightness */ {0x55, 0x8f}, /* expo */ {0x10, 0x25}, /* aech - exposure high bits */ {0xff, 0x13}, /* read 13, write ff 00 */ {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ }; static const u8 ov971x_init[][2] = { {0x12, 0x80}, {0x09, 0x10}, {0x1e, 0x07}, {0x5f, 0x18}, {0x69, 0x04}, {0x65, 0x2a}, {0x68, 0x0a}, {0x39, 0x28}, {0x4d, 0x90}, {0xc1, 0x80}, {0x0c, 0x30}, {0x6d, 0x02}, {0x96, 0xf1}, {0xbc, 0x68}, {0x12, 0x00}, {0x3b, 0x00}, {0x97, 0x80}, {0x17, 0x25}, {0x18, 0xa2}, {0x19, 0x01}, {0x1a, 0xca}, {0x03, 0x0a}, {0x32, 0x07}, {0x98, 0x40}, /*{0x98, 0x00},*/ {0x99, 0xA0}, /*{0x99, 0x00},*/ {0x9a, 0x01}, /*{0x9a, 0x00},*/ {0x57, 0x00}, {0x58, 0x78}, /*{0x58, 0xc8},*/ {0x59, 0x50}, /*{0x59, 0xa0},*/ {0x4c, 0x13}, {0x4b, 0x36}, {0x3d, 0x3c}, {0x3e, 0x03}, {0xbd, 0x50}, /*{0xbd, 0xa0},*/ {0xbe, 0x78}, /*{0xbe, 0xc8},*/ {0x4e, 0x55}, {0x4f, 0x55}, {0x50, 0x55}, {0x51, 0x55}, {0x24, 0x55}, {0x25, 0x40}, {0x26, 0xa1}, {0x5c, 0x59}, {0x5d, 0x00}, {0x11, 0x00}, {0x2a, 0x98}, {0x2b, 0x06}, {0x2d, 0x00}, {0x2e, 0x00}, {0x13, 0xa5}, {0x14, 0x40}, {0x4a, 0x00}, {0x49, 0xce}, {0x22, 0x03}, {0x09, 0x00} }; static const u8 ov965x_start_1_vga[][2] = { /* same for qvga */ {0x12, 0x62}, /* com7 - 30fps VGA YUV */ {0x36, 0xfa}, /* aref3 */ {0x69, 0x0a}, /* hv */ {0x8c, 0x89}, /* com22 */ {0x14, 0x28}, /* com9 */ {0x3e, 0x0c}, /* com14 */ {0x41, 0x40}, /* com16 */ {0x72, 0x00}, {0x73, 0x00}, {0x74, 0x3a}, {0x75, 0x35}, {0x76, 0x01}, {0xc7, 0x80}, /* com24 */ {0x03, 0x12}, /* vref */ {0x17, 0x16}, /* hstart */ {0x18, 0x02}, /* hstop */ {0x19, 0x01}, /* vstrt */ {0x1a, 0x3d}, /* vstop */ {0x32, 0xff}, /* href */ {0xc0, 0xaa}, }; static const u8 ov965x_start_1_svga[][2] = { {0x12, 0x02}, /* com7 - YUYV - VGA 15 full resolution */ {0x36, 0xf8}, /* aref3 */ {0x69, 0x02}, /* hv */ {0x8c, 0x0d}, /* com22 */ {0x3e, 0x0c}, /* com14 */ {0x41, 0x40}, /* com16 */ {0x72, 0x00}, {0x73, 0x01}, {0x74, 0x3a}, {0x75, 0x35}, {0x76, 0x01}, {0xc7, 0x80}, /* com24 */ {0x03, 0x1b}, /* vref */ {0x17, 0x1d}, /* hstart */ {0x18, 0xbd}, /* hstop */ {0x19, 0x01}, /* vstrt */ {0x1a, 0x81}, /* vstop */ {0x32, 0xff}, /* href */ {0xc0, 0xe2}, }; static const u8 ov965x_start_1_xga[][2] = { {0x12, 0x02}, /* com7 */ {0x36, 0xf8}, /* aref3 */ {0x69, 0x02}, /* hv */ {0x8c, 0x89}, /* com22 */ {0x14, 0x28}, /* com9 */ {0x3e, 0x0c}, /* com14 */ {0x41, 0x40}, /* com16 */ {0x72, 0x00}, {0x73, 0x01}, {0x74, 0x3a}, {0x75, 0x35}, {0x76, 0x01}, {0xc7, 0x80}, /* com24 */ {0x03, 0x1b}, /* vref */ {0x17, 0x1d}, /* hstart */ {0x18, 0xbd}, /* hstop */ {0x19, 0x01}, /* vstrt */ {0x1a, 0x81}, /* vstop */ {0x32, 0xff}, /* href */ {0xc0, 0xe2}, }; static const u8 ov965x_start_1_sxga[][2] = { {0x12, 0x02}, /* com7 */ {0x36, 0xf8}, /* aref3 */ {0x69, 0x02}, /* hv */ {0x8c, 0x89}, /* com22 */ {0x14, 0x28}, /* com9 */ {0x3e, 0x0c}, /* com14 */ {0x41, 0x40}, /* com16 */ {0x72, 0x00}, {0x73, 0x01}, {0x74, 0x3a}, {0x75, 0x35}, {0x76, 0x01}, {0xc7, 0x80}, /* com24 */ {0x03, 0x1b}, /* vref */ {0x17, 0x1d}, /* hstart */ {0x18, 0x02}, /* hstop */ {0x19, 0x01}, /* vstrt */ {0x1a, 0x81}, /* vstop */ {0x32, 0xff}, /* href */ {0xc0, 0xe2}, }; static const u8 bridge_start_qvga[][2] = { {0x94, 0xaa}, {0xf1, 0x60}, {0xe5, 0x04}, {0xc0, 0x50}, {0xc1, 0x3c}, {0x8c, 0x00}, {0x8d, 0x1c}, {0x34, 0x05}, {0xc2, 0x4c}, {0xc3, 0xf9}, {0xda, 0x00}, {0x50, 0x00}, {0x51, 0xa0}, {0x52, 0x78}, {0x53, 0x00}, {0x54, 0x00}, {0x55, 0x00}, {0x57, 0x00}, {0x5c, 0x00}, {0x5a, 0x50}, {0x5b, 0x3c}, {0x35, 0x02}, {0xd9, 0x10}, {0x94, 0x11}, }; static const u8 bridge_start_vga[][2] = { {0x94, 0xaa}, {0xf1, 0x60}, {0xe5, 0x04}, {0xc0, 0x50}, {0xc1, 0x3c}, {0x8c, 0x00}, {0x8d, 0x1c}, {0x34, 0x05}, {0xc2, 0x0c}, {0xc3, 0xf9}, {0xda, 0x01}, {0x50, 0x00}, {0x51, 0xa0}, {0x52, 0x3c}, {0x53, 0x00}, {0x54, 0x00}, {0x55, 0x00}, {0x57, 0x00}, {0x5c, 0x00}, {0x5a, 0xa0}, {0x5b, 0x78}, {0x35, 0x02}, {0xd9, 0x10}, {0x94, 0x11}, }; static const u8 bridge_start_svga[][2] = { {0x94, 0xaa}, {0xf1, 0x60}, {0xe5, 0x04}, {0xc0, 0xa0}, {0xc1, 0x80}, {0x8c, 0x00}, {0x8d, 0x1c}, {0x34, 0x05}, {0xc2, 0x4c}, {0xc3, 0xf9}, {0x50, 0x00}, {0x51, 0x40}, {0x52, 0x00}, {0x53, 0x00}, {0x54, 0x00}, {0x55, 0x88}, {0x57, 0x00}, {0x5c, 0x00}, {0x5a, 0xc8}, {0x5b, 0x96}, {0x35, 0x02}, {0xd9, 0x10}, {0xda, 0x00}, {0x94, 0x11}, }; static const u8 bridge_start_xga[][2] = { {0x94, 0xaa}, {0xf1, 0x60}, {0xe5, 0x04}, {0xc0, 0xa0}, {0xc1, 0x80}, {0x8c, 0x00}, {0x8d, 0x1c}, {0x34, 0x05}, {0xc2, 0x4c}, {0xc3, 0xf9}, {0x50, 0x00}, {0x51, 0x40}, {0x52, 0x00}, {0x53, 0x00}, {0x54, 0x00}, {0x55, 0x88}, {0x57, 0x00}, {0x5c, 0x01}, {0x5a, 0x00}, {0x5b, 0xc0}, {0x35, 0x02}, {0xd9, 0x10}, {0xda, 0x01}, {0x94, 0x11}, }; static const u8 bridge_start_sxga[][2] = { {0x94, 0xaa}, {0xf1, 0x60}, {0xe5, 0x04}, {0xc0, 0xa0}, {0xc1, 0x80}, {0x8c, 0x00}, {0x8d, 0x1c}, {0x34, 0x05}, {0xc2, 0x0c}, {0xc3, 0xf9}, {0xda, 0x00}, {0x35, 0x02}, {0xd9, 0x10}, {0x94, 0x11}, }; static const u8 ov965x_start_2_qvga[][2] = { {0x3b, 0xe4}, /* com11 - night mode 1/4 frame rate */ {0x1e, 0x04}, /* mvfp */ {0x13, 0xe0}, /* com8 */ {0x00, 0x00}, {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ {0x11, 0x01}, /* clkrc */ {0x6b, 0x5a}, /* dblv */ {0x6a, 0x02}, /* 50 Hz banding filter */ {0xc5, 0x03}, /* 60 Hz banding filter */ {0xa2, 0x96}, /* bd50 */ {0xa3, 0x7d}, /* bd60 */ {0xff, 0x13}, /* read 13, write ff 00 */ {0x13, 0xe7}, {0x3a, 0x80}, /* tslb - yuyv */ }; static const u8 ov965x_start_2_vga[][2] = { {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ {0x1e, 0x04}, /* mvfp */ {0x13, 0xe0}, /* com8 */ {0x00, 0x00}, {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ {0x11, 0x03}, /* clkrc */ {0x6b, 0x5a}, /* dblv */ {0x6a, 0x05}, /* 50 Hz banding filter */ {0xc5, 0x07}, /* 60 Hz banding filter */ {0xa2, 0x4b}, /* bd50 */ {0xa3, 0x3e}, /* bd60 */ {0x2d, 0x00}, /* advfl */ }; static const u8 ov965x_start_2_svga[][2] = { /* same for xga */ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ {0x1e, 0x04}, /* mvfp */ {0x13, 0xe0}, /* com8 */ {0x00, 0x00}, {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ {0x11, 0x01}, /* clkrc */ {0x6b, 0x5a}, /* dblv */ {0x6a, 0x0c}, /* 50 Hz banding filter */ {0xc5, 0x0f}, /* 60 Hz banding filter */ {0xa2, 0x4e}, /* bd50 */ {0xa3, 0x41}, /* bd60 */ }; static const u8 ov965x_start_2_sxga[][2] = { {0x13, 0xe0}, /* com8 */ {0x00, 0x00}, {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */ {0x1e, 0x04}, /* mvfp */ {0x11, 0x01}, /* clkrc */ {0x6b, 0x5a}, /* dblv */ {0x6a, 0x0c}, /* 50 Hz banding filter */ {0xc5, 0x0f}, /* 60 Hz banding filter */ {0xa2, 0x4e}, /* bd50 */ {0xa3, 0x41}, /* bd60 */ }; static const u8 ov562x_init[][2] = { {0x88, 0x20}, {0x89, 0x0a}, {0x8a, 0x90}, {0x8b, 0x06}, {0x8c, 0x01}, {0x8d, 0x10}, {0x1c, 0x00}, {0x1d, 0x48}, {0x1d, 0x00}, {0x1d, 0xff}, {0x1c, 0x0a}, {0x1d, 0x2e}, {0x1d, 0x1e}, }; static const u8 ov562x_init_2[][2] = { {0x12, 0x80}, {0x11, 0x41}, {0x13, 0x00}, {0x10, 0x1e}, {0x3b, 0x07}, {0x5b, 0x40}, {0x39, 0x07}, {0x53, 0x02}, {0x54, 0x60}, {0x04, 0x20}, {0x27, 0x04}, {0x3d, 0x40}, {0x36, 0x00}, {0xc5, 0x04}, {0x4e, 0x00}, {0x4f, 0x93}, {0x50, 0x7b}, {0xca, 0x0c}, {0xcb, 0x0f}, {0x39, 0x07}, {0x4a, 0x10}, {0x3e, 0x0a}, {0x3d, 0x00}, {0x0c, 0x38}, {0x38, 0x90}, {0x46, 0x30}, {0x4f, 0x93}, {0x50, 0x7b}, {0xab, 0x00}, {0xca, 0x0c}, {0xcb, 0x0f}, {0x37, 0x02}, {0x44, 0x48}, {0x8d, 0x44}, {0x2a, 0x00}, {0x2b, 0x00}, {0x32, 0x00}, {0x38, 0x90}, {0x53, 0x02}, {0x54, 0x60}, {0x12, 0x00}, {0x17, 0x12}, {0x18, 0xb4}, {0x19, 0x0c}, {0x1a, 0xf4}, {0x03, 0x4a}, {0x89, 0x20}, {0x83, 0x80}, {0xb7, 0x9d}, {0xb6, 0x11}, {0xb5, 0x55}, {0xb4, 0x00}, {0xa9, 0xf0}, {0xa8, 0x0a}, {0xb8, 0xf0}, {0xb9, 0xf0}, {0xba, 0xf0}, {0x81, 0x07}, {0x63, 0x44}, {0x13, 0xc7}, {0x14, 0x60}, {0x33, 0x75}, {0x2c, 0x00}, {0x09, 0x00}, {0x35, 0x30}, {0x27, 0x04}, {0x3c, 0x07}, {0x3a, 0x0a}, {0x3b, 0x07}, {0x01, 0x40}, {0x02, 0x40}, {0x16, 0x40}, {0x52, 0xb0}, {0x51, 0x83}, {0x21, 0xbb}, {0x22, 0x10}, {0x23, 0x03}, {0x35, 0x38}, {0x20, 0x90}, {0x28, 0x30}, {0x73, 0xe1}, {0x6c, 0x00}, {0x6d, 0x80}, {0x6e, 0x00}, {0x70, 0x04}, {0x71, 0x00}, {0x8d, 0x04}, {0x64, 0x00}, {0x65, 0x00}, {0x66, 0x00}, {0x67, 0x00}, {0x68, 0x00}, {0x69, 0x00}, {0x6a, 0x00}, {0x6b, 0x00}, {0x71, 0x94}, {0x74, 0x20}, {0x80, 0x09}, {0x85, 0xc0}, }; static void reg_w_i(struct gspca_dev *gspca_dev, u16 reg, u8 val) { struct usb_device *udev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return; gspca_dev->usb_buf[0] = val; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); if (ret < 0) { pr_err("reg_w failed %d\n", ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u16 reg, u8 val) { gspca_dbg(gspca_dev, D_USBO, "reg_w [%04x] = %02x\n", reg, val); reg_w_i(gspca_dev, reg, val); } static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg) { struct usb_device *udev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return 0; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT); gspca_dbg(gspca_dev, D_USBI, "reg_r [%04x] -> %02x\n", reg, gspca_dev->usb_buf[0]); if (ret < 0) { pr_err("reg_r err %d\n", ret); gspca_dev->usb_err = ret; return 0; } return gspca_dev->usb_buf[0]; } static int sccb_check_status(struct gspca_dev *gspca_dev) { u8 data; int i; for (i = 0; i < 5; i++) { msleep(20); data = reg_r(gspca_dev, OV534_REG_STATUS); switch (data) { case 0x00: return 1; case 0x04: return 0; case 0x03: break; default: gspca_dbg(gspca_dev, D_USBI|D_USBO, "sccb status 0x%02x, attempt %d/5\n", data, i + 1); } } return 0; } static void sccb_write(struct gspca_dev *gspca_dev, u8 reg, u8 val) { gspca_dbg(gspca_dev, D_USBO, "sccb_write [%02x] = %02x\n", reg, val); reg_w_i(gspca_dev, OV534_REG_SUBADDR, reg); reg_w_i(gspca_dev, OV534_REG_WRITE, val); reg_w_i(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3); if (!sccb_check_status(gspca_dev)) pr_err("sccb_write failed\n"); } static u8 sccb_read(struct gspca_dev *gspca_dev, u16 reg) { reg_w(gspca_dev, OV534_REG_SUBADDR, reg); reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_2); if (!sccb_check_status(gspca_dev)) pr_err("sccb_read failed 1\n"); reg_w(gspca_dev, OV534_REG_OPERATION, OV534_OP_READ_2); if (!sccb_check_status(gspca_dev)) pr_err("sccb_read failed 2\n"); return reg_r(gspca_dev, OV534_REG_READ); } /* output a bridge sequence (reg - val) */ static void reg_w_array(struct gspca_dev *gspca_dev, const u8 (*data)[2], int len) { while (--len >= 0) { reg_w(gspca_dev, (*data)[0], (*data)[1]); data++; } } /* output a sensor sequence (reg - val) */ static void sccb_w_array(struct gspca_dev *gspca_dev, const u8 (*data)[2], int len) { while (--len >= 0) { if ((*data)[0] != 0xff) { sccb_write(gspca_dev, (*data)[0], (*data)[1]); } else { sccb_read(gspca_dev, (*data)[1]); sccb_write(gspca_dev, 0xff, 0x00); } data++; } } /* Two bits control LED: 0x21 bit 7 and 0x23 bit 7. * (direction and output)? */ static void set_led(struct gspca_dev *gspca_dev, int status) { u8 data; gspca_dbg(gspca_dev, D_CONF, "led status: %d\n", status); data = reg_r(gspca_dev, 0x21); data |= 0x80; reg_w(gspca_dev, 0x21, data); data = reg_r(gspca_dev, 0x23); if (status) data |= 0x80; else data &= ~0x80; reg_w(gspca_dev, 0x23, data); if (!status) { data = reg_r(gspca_dev, 0x21); data &= ~0x80; reg_w(gspca_dev, 0x21, data); } } static void setbrightness(struct gspca_dev *gspca_dev, s32 brightness) { struct sd *sd = (struct sd *) gspca_dev; u8 val; s8 sval; if (sd->sensor == SENSOR_OV562x) { sval = brightness; val = 0x76; val += sval; sccb_write(gspca_dev, 0x24, val); val = 0x6a; val += sval; sccb_write(gspca_dev, 0x25, val); if (sval < -40) val = 0x71; else if (sval < 20) val = 0x94; else val = 0xe6; sccb_write(gspca_dev, 0x26, val); } else { val = brightness; if (val < 8) val = 15 - val; /* f .. 8 */ else val = val - 8; /* 0 .. 7 */ sccb_write(gspca_dev, 0x55, /* brtn - brightness adjustment */ 0x0f | (val << 4)); } } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { sccb_write(gspca_dev, 0x56, /* cnst1 - contrast 1 ctrl coeff */ val << 4); } static void setautogain(struct gspca_dev *gspca_dev, s32 autogain) { u8 val; /*fixme: should adjust agc/awb/aec by different controls */ val = sccb_read(gspca_dev, 0x13); /* com8 */ sccb_write(gspca_dev, 0xff, 0x00); if (autogain) val |= 0x05; /* agc & aec */ else val &= 0xfa; sccb_write(gspca_dev, 0x13, val); } static void setexposure(struct gspca_dev *gspca_dev, s32 exposure) { static const u8 expo[4] = {0x00, 0x25, 0x38, 0x5e}; u8 val; sccb_write(gspca_dev, 0x10, expo[exposure]); /* aec[9:2] */ val = sccb_read(gspca_dev, 0x13); /* com8 */ sccb_write(gspca_dev, 0xff, 0x00); sccb_write(gspca_dev, 0x13, val); val = sccb_read(gspca_dev, 0xa1); /* aech */ sccb_write(gspca_dev, 0xff, 0x00); sccb_write(gspca_dev, 0xa1, val & 0xe0); /* aec[15:10] = 0 */ } static void setsharpness(struct gspca_dev *gspca_dev, s32 val) { if (val < 0) { /* auto */ val = sccb_read(gspca_dev, 0x42); /* com17 */ sccb_write(gspca_dev, 0xff, 0x00); sccb_write(gspca_dev, 0x42, val | 0x40); /* Edge enhancement strength auto adjust */ return; } if (val != 0) val = 1 << (val - 1); sccb_write(gspca_dev, 0x3f, /* edge - edge enhance. factor */ val); val = sccb_read(gspca_dev, 0x42); /* com17 */ sccb_write(gspca_dev, 0xff, 0x00); sccb_write(gspca_dev, 0x42, val & 0xbf); } static void setsatur(struct gspca_dev *gspca_dev, s32 val) { u8 val1, val2, val3; static const u8 matrix[5][2] = { {0x14, 0x38}, {0x1e, 0x54}, {0x28, 0x70}, {0x32, 0x8c}, {0x48, 0x90} }; val1 = matrix[val][0]; val2 = matrix[val][1]; val3 = val1 + val2; sccb_write(gspca_dev, 0x4f, val3); /* matrix coeff */ sccb_write(gspca_dev, 0x50, val3); sccb_write(gspca_dev, 0x51, 0x00); sccb_write(gspca_dev, 0x52, val1); sccb_write(gspca_dev, 0x53, val2); sccb_write(gspca_dev, 0x54, val3); sccb_write(gspca_dev, 0x58, 0x1a); /* mtxs - coeff signs */ val1 = sccb_read(gspca_dev, 0x41); /* com16 */ sccb_write(gspca_dev, 0xff, 0x00); sccb_write(gspca_dev, 0x41, val1); } static void setlightfreq(struct gspca_dev *gspca_dev, s32 freq) { u8 val; val = sccb_read(gspca_dev, 0x13); /* com8 */ sccb_write(gspca_dev, 0xff, 0x00); if (freq == 0) { sccb_write(gspca_dev, 0x13, val & 0xdf); return; } sccb_write(gspca_dev, 0x13, val | 0x20); val = sccb_read(gspca_dev, 0x42); /* com17 */ sccb_write(gspca_dev, 0xff, 0x00); if (freq == 1) val |= 0x01; else val &= 0xfe; sccb_write(gspca_dev, 0x42, val); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 sensor_id; /* reset bridge */ reg_w(gspca_dev, 0xe7, 0x3a); reg_w(gspca_dev, 0xe0, 0x08); msleep(100); /* initialize the sensor address */ reg_w(gspca_dev, OV534_REG_ADDRESS, 0x60); /* reset sensor */ sccb_write(gspca_dev, 0x12, 0x80); msleep(10); /* probe the sensor */ sccb_read(gspca_dev, 0x0a); sensor_id = sccb_read(gspca_dev, 0x0a) << 8; sccb_read(gspca_dev, 0x0b); sensor_id |= sccb_read(gspca_dev, 0x0b); gspca_dbg(gspca_dev, D_PROBE, "Sensor ID: %04x\n", sensor_id); /* initialize */ if ((sensor_id & 0xfff0) == 0x9650) { sd->sensor = SENSOR_OV965x; gspca_dev->cam.cam_mode = ov965x_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(ov965x_mode); reg_w_array(gspca_dev, bridge_init, ARRAY_SIZE(bridge_init)); sccb_w_array(gspca_dev, ov965x_init, ARRAY_SIZE(ov965x_init)); reg_w_array(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2)); sccb_w_array(gspca_dev, ov965x_init_2, ARRAY_SIZE(ov965x_init_2)); reg_w(gspca_dev, 0xe0, 0x00); reg_w(gspca_dev, 0xe0, 0x01); set_led(gspca_dev, 0); reg_w(gspca_dev, 0xe0, 0x00); } else if ((sensor_id & 0xfff0) == 0x9710) { const char *p; int l; sd->sensor = SENSOR_OV971x; gspca_dev->cam.cam_mode = ov971x_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(ov971x_mode); gspca_dev->cam.bulk = 1; gspca_dev->cam.bulk_size = 16384; gspca_dev->cam.bulk_nurbs = 2; sccb_w_array(gspca_dev, ov971x_init, ARRAY_SIZE(ov971x_init)); /* set video format on bridge processor */ /* access bridge processor's video format registers at: 0x00 */ reg_w(gspca_dev, 0x1c, 0x00); /*set register: 0x00 is 'RAW8', 0x40 is 'YUV422' (YUYV?)*/ reg_w(gspca_dev, 0x1d, 0x00); /* Will W. specific stuff * set VSYNC to * output (0x1f) if first webcam * input (0x17) if 2nd or 3rd webcam */ p = video_device_node_name(&gspca_dev->vdev); l = strlen(p) - 1; if (p[l] == '0') reg_w(gspca_dev, 0x56, 0x1f); else reg_w(gspca_dev, 0x56, 0x17); } else if ((sensor_id & 0xfff0) == 0x5620) { sd->sensor = SENSOR_OV562x; gspca_dev->cam.cam_mode = ov562x_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(ov562x_mode); reg_w_array(gspca_dev, ov562x_init, ARRAY_SIZE(ov562x_init)); sccb_w_array(gspca_dev, ov562x_init_2, ARRAY_SIZE(ov562x_init_2)); reg_w(gspca_dev, 0xe0, 0x00); } else if ((sensor_id & 0xfff0) == 0x3610) { sd->sensor = SENSOR_OV361x; gspca_dev->cam.cam_mode = ov361x_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(ov361x_mode); reg_w(gspca_dev, 0xe7, 0x3a); reg_w(gspca_dev, 0xf1, 0x60); sccb_write(gspca_dev, 0x12, 0x80); } else { pr_err("Unknown sensor %04x", sensor_id); return -EINVAL; } return gspca_dev->usb_err; } static int sd_start_ov361x(struct gspca_dev *gspca_dev) { sccb_write(gspca_dev, 0x12, 0x80); msleep(20); switch (gspca_dev->curr_mode % (ov361x_last)) { case ov361x_2048: reg_w_array(gspca_dev, ov361x_bridge_start_2048, ARRAY_SIZE(ov361x_bridge_start_2048)); sccb_w_array(gspca_dev, ov361x_start_2048, ARRAY_SIZE(ov361x_start_2048)); break; case ov361x_1600: reg_w_array(gspca_dev, ov361x_bridge_start_1600, ARRAY_SIZE(ov361x_bridge_start_1600)); sccb_w_array(gspca_dev, ov361x_start_1600, ARRAY_SIZE(ov361x_start_1600)); break; case ov361x_1024: reg_w_array(gspca_dev, ov361x_bridge_start_1024, ARRAY_SIZE(ov361x_bridge_start_1024)); sccb_w_array(gspca_dev, ov361x_start_1024, ARRAY_SIZE(ov361x_start_1024)); break; case ov361x_640: reg_w_array(gspca_dev, ov361x_bridge_start_640, ARRAY_SIZE(ov361x_bridge_start_640)); sccb_w_array(gspca_dev, ov361x_start_640, ARRAY_SIZE(ov361x_start_640)); break; case ov361x_320: reg_w_array(gspca_dev, ov361x_bridge_start_320, ARRAY_SIZE(ov361x_bridge_start_320)); sccb_w_array(gspca_dev, ov361x_start_320, ARRAY_SIZE(ov361x_start_320)); break; case ov361x_160: reg_w_array(gspca_dev, ov361x_bridge_start_160, ARRAY_SIZE(ov361x_bridge_start_160)); sccb_w_array(gspca_dev, ov361x_start_160, ARRAY_SIZE(ov361x_start_160)); break; } reg_w(gspca_dev, 0xe0, 0x00); /* start transfer */ return gspca_dev->usb_err; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_OV971x) return gspca_dev->usb_err; if (sd->sensor == SENSOR_OV562x) return gspca_dev->usb_err; if (sd->sensor == SENSOR_OV361x) return sd_start_ov361x(gspca_dev); switch (gspca_dev->curr_mode) { case QVGA_MODE: /* 320x240 */ sccb_w_array(gspca_dev, ov965x_start_1_vga, ARRAY_SIZE(ov965x_start_1_vga)); reg_w_array(gspca_dev, bridge_start_qvga, ARRAY_SIZE(bridge_start_qvga)); sccb_w_array(gspca_dev, ov965x_start_2_qvga, ARRAY_SIZE(ov965x_start_2_qvga)); break; case VGA_MODE: /* 640x480 */ sccb_w_array(gspca_dev, ov965x_start_1_vga, ARRAY_SIZE(ov965x_start_1_vga)); reg_w_array(gspca_dev, bridge_start_vga, ARRAY_SIZE(bridge_start_vga)); sccb_w_array(gspca_dev, ov965x_start_2_vga, ARRAY_SIZE(ov965x_start_2_vga)); break; case SVGA_MODE: /* 800x600 */ sccb_w_array(gspca_dev, ov965x_start_1_svga, ARRAY_SIZE(ov965x_start_1_svga)); reg_w_array(gspca_dev, bridge_start_svga, ARRAY_SIZE(bridge_start_svga)); sccb_w_array(gspca_dev, ov965x_start_2_svga, ARRAY_SIZE(ov965x_start_2_svga)); break; case XGA_MODE: /* 1024x768 */ sccb_w_array(gspca_dev, ov965x_start_1_xga, ARRAY_SIZE(ov965x_start_1_xga)); reg_w_array(gspca_dev, bridge_start_xga, ARRAY_SIZE(bridge_start_xga)); sccb_w_array(gspca_dev, ov965x_start_2_svga, ARRAY_SIZE(ov965x_start_2_svga)); break; default: /* case SXGA_MODE: * 1280x1024 */ sccb_w_array(gspca_dev, ov965x_start_1_sxga, ARRAY_SIZE(ov965x_start_1_sxga)); reg_w_array(gspca_dev, bridge_start_sxga, ARRAY_SIZE(bridge_start_sxga)); sccb_w_array(gspca_dev, ov965x_start_2_sxga, ARRAY_SIZE(ov965x_start_2_sxga)); break; } reg_w(gspca_dev, 0xe0, 0x00); reg_w(gspca_dev, 0xe0, 0x00); set_led(gspca_dev, 1); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { if (((struct sd *)gspca_dev)->sensor == SENSOR_OV361x) { reg_w(gspca_dev, 0xe0, 0x01); /* stop transfer */ /* reg_w(gspca_dev, 0x31, 0x09); */ return; } reg_w(gspca_dev, 0xe0, 0x01); set_led(gspca_dev, 0); reg_w(gspca_dev, 0xe0, 0x00); } /* Values for bmHeaderInfo (Video and Still Image Payload Headers, 2.4.3.3) */ #define UVC_STREAM_EOH (1 << 7) #define UVC_STREAM_ERR (1 << 6) #define UVC_STREAM_STI (1 << 5) #define UVC_STREAM_RES (1 << 4) #define UVC_STREAM_SCR (1 << 3) #define UVC_STREAM_PTS (1 << 2) #define UVC_STREAM_EOF (1 << 1) #define UVC_STREAM_FID (1 << 0) static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; __u32 this_pts; u8 this_fid; int remaining_len = len; int payload_len; payload_len = gspca_dev->cam.bulk ? 2048 : 2040; do { len = min(remaining_len, payload_len); /* Payloads are prefixed with a UVC-style header. We consider a frame to start when the FID toggles, or the PTS changes. A frame ends when EOF is set, and we've received the correct number of bytes. */ /* Verify UVC header. Header length is always 12 */ if (data[0] != 12 || len < 12) { gspca_dbg(gspca_dev, D_PACK, "bad header\n"); goto discard; } /* Check errors */ if (data[1] & UVC_STREAM_ERR) { gspca_dbg(gspca_dev, D_PACK, "payload error\n"); goto discard; } /* Extract PTS and FID */ if (!(data[1] & UVC_STREAM_PTS)) { gspca_dbg(gspca_dev, D_PACK, "PTS not present\n"); goto discard; } this_pts = (data[5] << 24) | (data[4] << 16) | (data[3] << 8) | data[2]; this_fid = data[1] & UVC_STREAM_FID; /* If PTS or FID has changed, start a new frame. */ if (this_pts != sd->last_pts || this_fid != sd->last_fid) { if (gspca_dev->last_packet_type == INTER_PACKET) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); sd->last_pts = this_pts; sd->last_fid = this_fid; gspca_frame_add(gspca_dev, FIRST_PACKET, data + 12, len - 12); /* If this packet is marked as EOF, end the frame */ } else if (data[1] & UVC_STREAM_EOF) { sd->last_pts = 0; gspca_frame_add(gspca_dev, LAST_PACKET, data + 12, len - 12); } else { /* Add the data from this payload */ gspca_frame_add(gspca_dev, INTER_PACKET, data + 12, len - 12); } /* Done this payload */ goto scan_next; discard: /* Discard data until a new frame starts. */ gspca_dev->last_packet_type = DISCARD_PACKET; scan_next: remaining_len -= len; data += len; } while (remaining_len > 0); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setsatur(gspca_dev, ctrl->val); break; case V4L2_CID_POWER_LINE_FREQUENCY: setlightfreq(gspca_dev, ctrl->val); break; case V4L2_CID_SHARPNESS: setsharpness(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: if (ctrl->is_new) setautogain(gspca_dev, ctrl->val); if (!ctrl->val && gspca_dev->exposure->is_new) setexposure(gspca_dev, gspca_dev->exposure->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; if (sd->sensor == SENSOR_OV971x) return 0; if (sd->sensor == SENSOR_OV361x) return 0; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 7); if (sd->sensor == SENSOR_OV562x) { v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, -90, 90, 1, 0); } else { v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 15, 1, 7); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 15, 1, 3); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 4, 1, 2); /* -1 = auto */ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, -1, 4, 1, -1); gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 3, 1, 0); v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0); v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false); } if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x05a9, 0x8065)}, {USB_DEVICE(0x06f8, 0x3003)}, {USB_DEVICE(0x05a9, 0x1550)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
1 2 2 1 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for various HID compliant devices by ThrustMaster: * ThrustMaster FireStorm Dual Power 2 * and possibly others whose device ids haven't been added. * * Modified to support ThrustMaster devices by Zinx Verituse * on 2003-01-25 from the Logitech force feedback driver, * which is by Johann Deneux. * * Copyright (c) 2003 Zinx Verituse <zinx@epicsol.org> * Copyright (c) 2002 Johann Deneux */ /* */ #include <linux/hid.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include "hid-ids.h" #define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320 static const signed short ff_rumble[] = { FF_RUMBLE, -1 }; static const signed short ff_joystick[] = { FF_CONSTANT, -1 }; #ifdef CONFIG_THRUSTMASTER_FF /* Usages for thrustmaster devices I know about */ #define THRUSTMASTER_USAGE_FF (HID_UP_GENDESK | 0xbb) struct tmff_device { struct hid_report *report; struct hid_field *ff_field; }; /* Changes values from 0 to 0xffff into values from minimum to maximum */ static inline int tmff_scale_u16(unsigned int in, int minimum, int maximum) { int ret; ret = (in * (maximum - minimum) / 0xffff) + minimum; if (ret < minimum) return minimum; if (ret > maximum) return maximum; return ret; } /* Changes values from -0x80 to 0x7f into values from minimum to maximum */ static inline int tmff_scale_s8(int in, int minimum, int maximum) { int ret; ret = (((in + 0x80) * (maximum - minimum)) / 0xff) + minimum; if (ret < minimum) return minimum; if (ret > maximum) return maximum; return ret; } static int tmff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct tmff_device *tmff = data; struct hid_field *ff_field = tmff->ff_field; int x, y; int left, right; /* Rumbling */ switch (effect->type) { case FF_CONSTANT: x = tmff_scale_s8(effect->u.ramp.start_level, ff_field->logical_minimum, ff_field->logical_maximum); y = tmff_scale_s8(effect->u.ramp.end_level, ff_field->logical_minimum, ff_field->logical_maximum); dbg_hid("(x, y)=(%04x, %04x)\n", x, y); ff_field->value[0] = x; ff_field->value[1] = y; hid_hw_request(hid, tmff->report, HID_REQ_SET_REPORT); break; case FF_RUMBLE: left = tmff_scale_u16(effect->u.rumble.weak_magnitude, ff_field->logical_minimum, ff_field->logical_maximum); right = tmff_scale_u16(effect->u.rumble.strong_magnitude, ff_field->logical_minimum, ff_field->logical_maximum); /* 2-in-1 strong motor is left */ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) swap(left, right); dbg_hid("(left,right)=(%08x, %08x)\n", left, right); ff_field->value[0] = left; ff_field->value[1] = right; hid_hw_request(hid, tmff->report, HID_REQ_SET_REPORT); break; } return 0; } static int tmff_init(struct hid_device *hid, const signed short *ff_bits) { struct tmff_device *tmff; struct hid_report *report; struct list_head *report_list; struct hid_input *hidinput; struct input_dev *input_dev; int error; int i; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); input_dev = hidinput->input; tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL); if (!tmff) return -ENOMEM; /* Find the report to use */ report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; list_for_each_entry(report, report_list, list) { int fieldnum; for (fieldnum = 0; fieldnum < report->maxfield; ++fieldnum) { struct hid_field *field = report->field[fieldnum]; if (field->maxusage <= 0) continue; switch (field->usage[0].hid) { case THRUSTMASTER_USAGE_FF: if (field->report_count < 2) { hid_warn(hid, "ignoring FF field with report_count < 2\n"); continue; } if (field->logical_maximum == field->logical_minimum) { hid_warn(hid, "ignoring FF field with logical_maximum == logical_minimum\n"); continue; } if (tmff->report && tmff->report != report) { hid_warn(hid, "ignoring FF field in other report\n"); continue; } if (tmff->ff_field && tmff->ff_field != field) { hid_warn(hid, "ignoring duplicate FF field\n"); continue; } tmff->report = report; tmff->ff_field = field; for (i = 0; ff_bits[i] >= 0; i++) set_bit(ff_bits[i], input_dev->ffbit); break; default: hid_warn(hid, "ignoring unknown output usage %08x\n", field->usage[0].hid); continue; } } } if (!tmff->report) { hid_err(hid, "can't find FF field in output reports\n"); error = -ENODEV; goto fail; } error = input_ff_create_memless(input_dev, tmff, tmff_play); if (error) goto fail; hid_info(hid, "force feedback for ThrustMaster devices by Zinx Verituse <zinx@epicsol.org>\n"); return 0; fail: kfree(tmff); return error; } #else static inline int tmff_init(struct hid_device *hid, const signed short *ff_bits) { return 0; } #endif static int tm_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } tmff_init(hdev, (void *)id->driver_data); return 0; err: return ret; } static const struct hid_device_id tm_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300), .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605), /* NASCAR PRO FF2 Wheel */ .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */ .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653), /* RGT Force Feedback CLUTCH Raging Wheel */ .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */ .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a), /* F430 Force Feedback Wheel */ .driver_data = (unsigned long)ff_joystick }, { } }; MODULE_DEVICE_TABLE(hid, tm_devices); static struct hid_driver tm_driver = { .name = "thrustmaster", .id_table = tm_devices, .probe = tm_probe, }; module_hid_driver(tm_driver); MODULE_DESCRIPTION("Force feedback support for various HID compliant devices by ThrustMaster"); MODULE_LICENSE("GPL");
10 10 6 10 10 10 10 4 6 9 10 10 10 10 10 10 10 10 9 10 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2019, 2022-2024 Intel Corporation */ #include <linux/kernel.h> #include <linux/rtnetlink.h> #include <linux/module.h> #include <linux/slab.h> #include "rate.h" #include "ieee80211_i.h" #include "debugfs.h" struct rate_control_alg { struct list_head list; const struct rate_control_ops *ops; }; static LIST_HEAD(rate_ctrl_algs); static DEFINE_MUTEX(rate_ctrl_mutex); static char *ieee80211_default_rc_algo = CONFIG_MAC80211_RC_DEFAULT; module_param(ieee80211_default_rc_algo, charp, 0644); MODULE_PARM_DESC(ieee80211_default_rc_algo, "Default rate control algorithm for mac80211 to use"); void rate_control_rate_init(struct link_sta_info *link_sta) { struct sta_info *sta = link_sta->sta; struct ieee80211_local *local = sta->sdata->local; struct rate_control_ref *ref = sta->rate_ctrl; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; struct ieee80211_supported_band *sband; struct ieee80211_chanctx_conf *chanctx_conf; ieee80211_sta_init_nss(link_sta); if (!ref) return; /* SW rate control isn't supported with MLO right now */ if (WARN_ON(ieee80211_vif_is_mld(&sta->sdata->vif))) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sta->sdata->vif.bss_conf.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; /* TODO: check for minstrel_s1g ? */ if (sband->band == NL80211_BAND_S1GHZ) { ieee80211_s1g_sta_rate_init(sta); rcu_read_unlock(); return; } spin_lock_bh(&sta->rate_ctrl_lock); ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista, priv_sta); spin_unlock_bh(&sta->rate_ctrl_lock); rcu_read_unlock(); set_sta_flag(sta, WLAN_STA_RATE_CONTROL); } void rate_control_rate_init_all_links(struct sta_info *sta) { int link_id; for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { struct link_sta_info *link_sta; link_sta = sdata_dereference(sta->link[link_id], sta->sdata); if (!link_sta) continue; rate_control_rate_init(link_sta); } } void rate_control_tx_status(struct ieee80211_local *local, struct ieee80211_tx_status *st) { struct rate_control_ref *ref = local->rate_ctrl; struct sta_info *sta = container_of(st->sta, struct sta_info, sta); void *priv_sta = sta->rate_ctrl_priv; struct ieee80211_supported_band *sband; if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) return; sband = local->hw.wiphy->bands[st->info->band]; spin_lock_bh(&sta->rate_ctrl_lock); if (ref->ops->tx_status_ext) ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st); else if (st->skb) ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb); else WARN_ON_ONCE(1); spin_unlock_bh(&sta->rate_ctrl_lock); } void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, struct link_sta_info *link_sta, u32 changed) { struct rate_control_ref *ref = local->rate_ctrl; struct sta_info *sta = link_sta->sta; struct ieee80211_sta *ista = &sta->sta; void *priv_sta = sta->rate_ctrl_priv; struct ieee80211_chanctx_conf *chanctx_conf; if (ref && ref->ops->rate_update) { rcu_read_lock(); chanctx_conf = rcu_dereference(sta->sdata->vif.bss_conf.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } spin_lock_bh(&sta->rate_ctrl_lock); ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def, ista, priv_sta, changed); spin_unlock_bh(&sta->rate_ctrl_lock); rcu_read_unlock(); } if (sta->uploaded) drv_link_sta_rc_update(local, sta->sdata, link_sta->pub, changed); } int ieee80211_rate_control_register(const struct rate_control_ops *ops) { struct rate_control_alg *alg; if (!ops->name) return -EINVAL; mutex_lock(&rate_ctrl_mutex); list_for_each_entry(alg, &rate_ctrl_algs, list) { if (!strcmp(alg->ops->name, ops->name)) { /* don't register an algorithm twice */ WARN_ON(1); mutex_unlock(&rate_ctrl_mutex); return -EALREADY; } } alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (alg == NULL) { mutex_unlock(&rate_ctrl_mutex); return -ENOMEM; } alg->ops = ops; list_add_tail(&alg->list, &rate_ctrl_algs); mutex_unlock(&rate_ctrl_mutex); return 0; } EXPORT_SYMBOL(ieee80211_rate_control_register); void ieee80211_rate_control_unregister(const struct rate_control_ops *ops) { struct rate_control_alg *alg; mutex_lock(&rate_ctrl_mutex); list_for_each_entry(alg, &rate_ctrl_algs, list) { if (alg->ops == ops) { list_del(&alg->list); kfree(alg); break; } } mutex_unlock(&rate_ctrl_mutex); } EXPORT_SYMBOL(ieee80211_rate_control_unregister); static const struct rate_control_ops * ieee80211_try_rate_control_ops_get(const char *name) { struct rate_control_alg *alg; const struct rate_control_ops *ops = NULL; if (!name) return NULL; mutex_lock(&rate_ctrl_mutex); list_for_each_entry(alg, &rate_ctrl_algs, list) { if (!strcmp(alg->ops->name, name)) { ops = alg->ops; break; } } mutex_unlock(&rate_ctrl_mutex); return ops; } /* Get the rate control algorithm. */ static const struct rate_control_ops * ieee80211_rate_control_ops_get(const char *name) { const struct rate_control_ops *ops; const char *alg_name; kernel_param_lock(THIS_MODULE); if (!name) alg_name = ieee80211_default_rc_algo; else alg_name = name; ops = ieee80211_try_rate_control_ops_get(alg_name); if (!ops && name) /* try default if specific alg requested but not found */ ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo); /* Note: check for > 0 is intentional to avoid clang warning */ if (!ops && (strlen(CONFIG_MAC80211_RC_DEFAULT) > 0)) /* try built-in one if specific alg requested but not found */ ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT); kernel_param_unlock(THIS_MODULE); return ops; } #ifdef CONFIG_MAC80211_DEBUGFS static ssize_t rcname_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct rate_control_ref *ref = file->private_data; int len = strlen(ref->ops->name); return simple_read_from_buffer(userbuf, count, ppos, ref->ops->name, len); } const struct debugfs_short_fops rcname_ops = { .read = rcname_read, .llseek = default_llseek, }; #endif static struct rate_control_ref * rate_control_alloc(const char *name, struct ieee80211_local *local) { struct rate_control_ref *ref; ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); if (!ref) return NULL; ref->ops = ieee80211_rate_control_ops_get(name); if (!ref->ops) goto free; ref->priv = ref->ops->alloc(&local->hw); if (!ref->priv) goto free; return ref; free: kfree(ref); return NULL; } static void rate_control_free(struct ieee80211_local *local, struct rate_control_ref *ctrl_ref) { ctrl_ref->ops->free(ctrl_ref->priv); #ifdef CONFIG_MAC80211_DEBUGFS debugfs_remove_recursive(local->debugfs.rcdir); local->debugfs.rcdir = NULL; #endif kfree(ctrl_ref); } void ieee80211_check_rate_mask(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; u32 user_mask, basic_rates = link->conf->basic_rates; enum nl80211_band band; if (WARN_ON(!link->conf->chanreq.oper.chan)) return; band = link->conf->chanreq.oper.chan->band; if (band == NL80211_BAND_S1GHZ) { /* TODO */ return; } if (WARN_ON_ONCE(!basic_rates)) return; user_mask = sdata->rc_rateidx_mask[band]; sband = local->hw.wiphy->bands[band]; if (user_mask & basic_rates) return; sdata_dbg(sdata, "no overlap between basic rates (0x%x) and user mask (0x%x on band %d) - clearing the latter", basic_rates, user_mask, band); sdata->rc_rateidx_mask[band] = (1 << sband->n_bitrates) - 1; } static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc) { struct sk_buff *skb = txrc->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); return (info->flags & (IEEE80211_TX_CTL_NO_ACK | IEEE80211_TX_CTL_USE_MINRATE)) || !ieee80211_is_tx_data(skb); } static void rc_send_low_basicrate(struct ieee80211_tx_rate *rate, u32 basic_rates, struct ieee80211_supported_band *sband) { u8 i; if (sband->band == NL80211_BAND_S1GHZ) { /* TODO */ rate->flags |= IEEE80211_TX_RC_S1G_MCS; rate->idx = 0; return; } if (basic_rates == 0) return; /* assume basic rates unknown and accept rate */ if (rate->idx < 0) return; if (basic_rates & (1 << rate->idx)) return; /* selected rate is a basic rate */ for (i = rate->idx + 1; i <= sband->n_bitrates; i++) { if (basic_rates & (1 << i)) { rate->idx = i; return; } } /* could not find a basic rate; use original selection */ } static void __rate_control_send_low(struct ieee80211_hw *hw, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, struct ieee80211_tx_info *info, u32 rate_mask) { int i; u32 rate_flags = ieee80211_chandef_rate_flags(&hw->conf.chandef); if (sband->band == NL80211_BAND_S1GHZ) { info->control.rates[0].flags |= IEEE80211_TX_RC_S1G_MCS; info->control.rates[0].idx = 0; return; } if ((sband->band == NL80211_BAND_2GHZ) && (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) rate_flags |= IEEE80211_RATE_ERP_G; info->control.rates[0].idx = 0; for (i = 0; i < sband->n_bitrates; i++) { if (!(rate_mask & BIT(i))) continue; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (!rate_supported(sta, sband->band, i)) continue; info->control.rates[0].idx = i; break; } WARN_ONCE(i == sband->n_bitrates, "no supported rates for sta %pM (0x%x, band %d) in rate_mask 0x%x with flags 0x%x\n", sta ? sta->addr : NULL, sta ? sta->deflink.supp_rates[sband->band] : -1, sband->band, rate_mask, rate_flags); info->control.rates[0].count = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 1 : hw->max_rate_tries; info->control.skip_table = 1; } static bool rate_control_send_low(struct ieee80211_sta *pubsta, struct ieee80211_tx_rate_control *txrc) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); struct ieee80211_supported_band *sband = txrc->sband; struct sta_info *sta; int mcast_rate; bool use_basicrate = false; if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) { __rate_control_send_low(txrc->hw, sband, pubsta, info, txrc->rate_idx_mask); if (!pubsta && txrc->bss) { mcast_rate = txrc->bss_conf->mcast_rate[sband->band]; if (mcast_rate > 0) { info->control.rates[0].idx = mcast_rate - 1; return true; } use_basicrate = true; } else if (pubsta) { sta = container_of(pubsta, struct sta_info, sta); if (ieee80211_vif_is_mesh(&sta->sdata->vif)) use_basicrate = true; } if (use_basicrate) rc_send_low_basicrate(&info->control.rates[0], txrc->bss_conf->basic_rates, sband); return true; } return false; } static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask) { int j; /* See whether the selected rate or anything below it is allowed. */ for (j = *rate_idx; j >= 0; j--) { if (mask & (1 << j)) { /* Okay, found a suitable rate. Use it. */ *rate_idx = j; return true; } } /* Try to find a higher rate that would be allowed */ for (j = *rate_idx + 1; j < n_bitrates; j++) { if (mask & (1 << j)) { /* Okay, found a suitable rate. Use it. */ *rate_idx = j; return true; } } return false; } static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask) { int i, j; int ridx, rbit; ridx = *rate_idx / 8; rbit = *rate_idx % 8; /* sanity check */ if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN) return false; /* See whether the selected rate or anything below it is allowed. */ for (i = ridx; i >= 0; i--) { for (j = rbit; j >= 0; j--) if (mcs_mask[i] & BIT(j)) { *rate_idx = i * 8 + j; return true; } rbit = 7; } /* Try to find a higher rate that would be allowed */ ridx = (*rate_idx + 1) / 8; rbit = (*rate_idx + 1) % 8; for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) { for (j = rbit; j < 8; j++) if (mcs_mask[i] & BIT(j)) { *rate_idx = i * 8 + j; return true; } rbit = 0; } return false; } static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask) { int i, j; int ridx, rbit; ridx = *rate_idx >> 4; rbit = *rate_idx & 0xf; if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX) return false; /* See whether the selected rate or anything below it is allowed. */ for (i = ridx; i >= 0; i--) { for (j = rbit; j >= 0; j--) { if (vht_mask[i] & BIT(j)) { *rate_idx = (i << 4) | j; return true; } } rbit = 15; } /* Try to find a higher rate that would be allowed */ ridx = (*rate_idx + 1) >> 4; rbit = (*rate_idx + 1) & 0xf; for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) { for (j = rbit; j < 16; j++) { if (vht_mask[i] & BIT(j)) { *rate_idx = (i << 4) | j; return true; } } rbit = 0; } return false; } static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags, struct ieee80211_supported_band *sband, enum nl80211_chan_width chan_width, u32 mask, u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], u16 vht_mask[NL80211_VHT_NSS_MAX]) { if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) { /* handle VHT rates */ if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask)) return; *rate_idx = 0; /* keep protection flags */ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT | IEEE80211_TX_RC_USE_SHORT_PREAMBLE); *rate_flags |= IEEE80211_TX_RC_MCS; if (chan_width == NL80211_CHAN_WIDTH_40) *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; /* also try the legacy rates. */ *rate_flags &= ~(IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_40_MHZ_WIDTH); if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, mask)) return; } else if (*rate_flags & IEEE80211_TX_RC_MCS) { /* handle HT rates */ if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; /* also try the legacy rates. */ *rate_idx = 0; /* keep protection flags */ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT | IEEE80211_TX_RC_USE_SHORT_PREAMBLE); if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, mask)) return; } else { /* handle legacy rates */ if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, mask)) return; /* if HT BSS, and we handle a data frame, also try HT rates */ switch (chan_width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: return; default: break; } *rate_idx = 0; /* keep protection flags */ *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT | IEEE80211_TX_RC_USE_SHORT_PREAMBLE); *rate_flags |= IEEE80211_TX_RC_MCS; if (chan_width == NL80211_CHAN_WIDTH_40) *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; } /* * Uh.. No suitable rate exists. This should not really happen with * sane TX rate mask configurations. However, should someone manage to * configure supported rates and TX rate mask in incompatible way, * allow the frame to be transmitted with whatever the rate control * selected. */ } static void rate_fixup_ratelist(struct ieee80211_vif *vif, struct ieee80211_supported_band *sband, struct ieee80211_tx_info *info, struct ieee80211_tx_rate *rates, int max_rates) { struct ieee80211_rate *rate; bool inval = false; int i; /* * Set up the RTS/CTS rate as the fastest basic rate * that is not faster than the data rate unless there * is no basic rate slower than the data rate, in which * case we pick the slowest basic rate * * XXX: Should this check all retry rates? */ if (!(rates[0].flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) { u32 basic_rates = vif->bss_conf.basic_rates; s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0; rate = &sband->bitrates[rates[0].idx]; for (i = 0; i < sband->n_bitrates; i++) { /* must be a basic rate */ if (!(basic_rates & BIT(i))) continue; /* must not be faster than the data rate */ if (sband->bitrates[i].bitrate > rate->bitrate) continue; /* maximum */ if (sband->bitrates[baserate].bitrate < sband->bitrates[i].bitrate) baserate = i; } info->control.rts_cts_rate_idx = baserate; } for (i = 0; i < max_rates; i++) { /* * make sure there's no valid rate following * an invalid one, just in case drivers don't * take the API seriously to stop at -1. */ if (inval) { rates[i].idx = -1; continue; } if (rates[i].idx < 0) { inval = true; continue; } /* * For now assume MCS is already set up correctly, this * needs to be fixed. */ if (rates[i].flags & IEEE80211_TX_RC_MCS) { WARN_ON(rates[i].idx > 76); if (!(rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) && info->control.use_cts_prot) rates[i].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; continue; } if (rates[i].flags & IEEE80211_TX_RC_VHT_MCS) { WARN_ON(ieee80211_rate_get_vht_mcs(&rates[i]) > 9); continue; } /* set up RTS protection if desired */ if (info->control.use_rts) { rates[i].flags |= IEEE80211_TX_RC_USE_RTS_CTS; info->control.use_cts_prot = false; } /* RC is busted */ if (WARN_ON_ONCE(rates[i].idx >= sband->n_bitrates)) { rates[i].idx = -1; continue; } rate = &sband->bitrates[rates[i].idx]; /* set up short preamble */ if (info->control.short_preamble && rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) rates[i].flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; /* set up G protection */ if (!(rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) && info->control.use_cts_prot && rate->flags & IEEE80211_RATE_ERP_G) rates[i].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; } } static void rate_control_fill_sta_table(struct ieee80211_sta *sta, struct ieee80211_tx_info *info, struct ieee80211_tx_rate *rates, int max_rates) { struct ieee80211_sta_rates *ratetbl = NULL; int i; if (sta && !info->control.skip_table) ratetbl = rcu_dereference(sta->rates); /* Fill remaining rate slots with data from the sta rate table. */ max_rates = min_t(int, max_rates, IEEE80211_TX_RATE_TABLE_SIZE); for (i = 0; i < max_rates; i++) { if (i < ARRAY_SIZE(info->control.rates) && info->control.rates[i].idx >= 0 && info->control.rates[i].count) { if (rates != info->control.rates) rates[i] = info->control.rates[i]; } else if (ratetbl) { rates[i].idx = ratetbl->rate[i].idx; rates[i].flags = ratetbl->rate[i].flags; if (info->control.use_rts) rates[i].count = ratetbl->rate[i].count_rts; else if (info->control.use_cts_prot) rates[i].count = ratetbl->rate[i].count_cts; else rates[i].count = ratetbl->rate[i].count; } else { rates[i].idx = -1; rates[i].count = 0; } if (rates[i].idx < 0 || !rates[i].count) break; } } static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, u32 *mask, u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], u16 vht_mask[NL80211_VHT_NSS_MAX]) { u32 i, flags; *mask = sdata->rc_rateidx_mask[sband->band]; flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper); for (i = 0; i < sband->n_bitrates; i++) { if ((flags & sband->bitrates[i].flags) != flags) *mask &= ~BIT(i); } if (*mask == (1 << sband->n_bitrates) - 1 && !sdata->rc_has_mcs_mask[sband->band] && !sdata->rc_has_vht_mcs_mask[sband->band]) return false; if (sdata->rc_has_mcs_mask[sband->band]) memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band], IEEE80211_HT_MCS_MASK_LEN); else memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN); if (sdata->rc_has_vht_mcs_mask[sband->band]) memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band], sizeof(u16) * NL80211_VHT_NSS_MAX); else memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX); if (sta) { __le16 sta_vht_cap; u16 sta_vht_mask[NL80211_VHT_NSS_MAX]; /* Filter out rates that the STA does not support */ *mask &= sta->deflink.supp_rates[sband->band]; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) mcs_mask[i] &= sta->deflink.ht_cap.mcs.rx_mask[i]; sta_vht_cap = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask); for (i = 0; i < NL80211_VHT_NSS_MAX; i++) vht_mask[i] &= sta_vht_mask[i]; } return true; } static void rate_control_apply_mask_ratetbl(struct sta_info *sta, struct ieee80211_supported_band *sband, struct ieee80211_sta_rates *rates) { int i; u32 mask; u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; u16 vht_mask[NL80211_VHT_NSS_MAX]; enum nl80211_chan_width chan_width; if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask, mcs_mask, vht_mask)) return; chan_width = sta->sdata->vif.bss_conf.chanreq.oper.width; for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) { if (rates->rate[i].idx < 0) break; rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags, sband, chan_width, mask, mcs_mask, vht_mask); } } static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct ieee80211_tx_rate *rates, int max_rates) { enum nl80211_chan_width chan_width; u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; u32 mask; u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX]; int i; /* * Try to enforce the rateidx mask the user wanted. skip this if the * default mask (allow all rates) is used to save some processing for * the common case. */ if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask, vht_mask)) return; /* * Make sure the rate index selected for each TX rate is * included in the configured mask and change the rate indexes * if needed. */ chan_width = sdata->vif.bss_conf.chanreq.oper.width; for (i = 0; i < max_rates; i++) { /* Skip invalid rates */ if (rates[i].idx < 0) break; rate_flags = rates[i].flags; rate_idx_match_mask(&rates[i].idx, &rate_flags, sband, chan_width, mask, mcs_mask, vht_mask); rates[i].flags = rate_flags; } } void ieee80211_get_tx_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct sk_buff *skb, struct ieee80211_tx_rate *dest, int max_rates) { struct ieee80211_sub_if_data *sdata; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_supported_band *sband; u32 mask = ~0; rate_control_fill_sta_table(sta, info, dest, max_rates); if (!vif) return; sdata = vif_to_sdata(vif); sband = sdata->local->hw.wiphy->bands[info->band]; if (ieee80211_is_tx_data(skb)) rate_control_apply_mask(sdata, sta, sband, dest, max_rates); if (!(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) mask = sdata->rc_rateidx_mask[info->band]; if (dest[0].idx < 0) __rate_control_send_low(&sdata->local->hw, sband, sta, info, mask); if (sta) rate_fixup_ratelist(vif, sband, info, dest, max_rates); } EXPORT_SYMBOL(ieee80211_get_tx_rates); void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_tx_rate_control *txrc) { struct rate_control_ref *ref = sdata->local->rate_ctrl; void *priv_sta = NULL; struct ieee80211_sta *ista = NULL; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); int i; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { info->control.rates[i].idx = -1; info->control.rates[i].flags = 0; info->control.rates[i].count = 0; } if (rate_control_send_low(sta ? &sta->sta : NULL, txrc)) return; if (ieee80211_hw_check(&sdata->local->hw, HAS_RATE_CONTROL)) return; if (sta && test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) { ista = &sta->sta; priv_sta = sta->rate_ctrl_priv; } if (ista) { spin_lock_bh(&sta->rate_ctrl_lock); ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); spin_unlock_bh(&sta->rate_ctrl_lock); } else { rate_control_send_low(NULL, txrc); } if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_RC_TABLE)) return; ieee80211_get_tx_rates(&sdata->vif, ista, txrc->skb, info->control.rates, ARRAY_SIZE(info->control.rates)); } int rate_control_set_rates(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_sta_rates *rates) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sta_rates *old; struct ieee80211_supported_band *sband; sband = ieee80211_get_sband(sta->sdata); if (!sband) return -EINVAL; rate_control_apply_mask_ratetbl(sta, sband, rates); /* * mac80211 guarantees that this function will not be called * concurrently, so the following RCU access is safe, even without * extra locking. This can not be checked easily, so we just set * the condition to true. */ old = rcu_dereference_protected(pubsta->rates, true); rcu_assign_pointer(pubsta->rates, rates); if (old) kfree_rcu(old, rcu_head); if (sta->uploaded) drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta)); return 0; } EXPORT_SYMBOL(rate_control_set_rates); int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, const char *name) { struct rate_control_ref *ref; ASSERT_RTNL(); if (local->open_count) return -EBUSY; if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { if (WARN_ON(!local->ops->set_rts_threshold)) return -EINVAL; return 0; } ref = rate_control_alloc(name, local); if (!ref) { wiphy_warn(local->hw.wiphy, "Failed to select rate control algorithm\n"); return -ENOENT; } WARN_ON(local->rate_ctrl); local->rate_ctrl = ref; wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n", ref->ops->name); return 0; } void rate_control_deinitialize(struct ieee80211_local *local) { struct rate_control_ref *ref; ref = local->rate_ctrl; if (!ref) return; local->rate_ctrl = NULL; rate_control_free(local, ref); }
225 221 279 212 276 214 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* audit.h -- Auditing support * * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * Written by Rickard E. (Rik) Faith <faith@redhat.com> */ #ifndef _LINUX_AUDIT_H_ #define _LINUX_AUDIT_H_ #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/audit_arch.h> #include <uapi/linux/audit.h> #include <uapi/linux/netfilter/nf_tables.h> #include <uapi/linux/fanotify.h> #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) struct audit_sig_info { uid_t uid; pid_t pid; char ctx[]; }; struct audit_buffer; struct audit_context; struct inode; struct netlink_skb_parms; struct path; struct linux_binprm; struct mq_attr; struct mqstat; struct audit_watch; struct audit_tree; struct sk_buff; struct kern_ipc_perm; struct audit_krule { u32 pflags; u32 flags; u32 listnr; u32 action; u32 mask[AUDIT_BITMASK_SIZE]; u32 buflen; /* for data alloc on list rules */ u32 field_count; char *filterkey; /* ties events to rules */ struct audit_field *fields; struct audit_field *arch_f; /* quick access to arch field */ struct audit_field *inode_f; /* quick access to an inode field */ struct audit_watch *watch; /* associated watch */ struct audit_tree *tree; /* associated watched tree */ struct audit_fsnotify_mark *exe; struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ struct list_head list; /* for AUDIT_LIST* purposes only */ u64 prio; }; /* Flag to indicate legacy AUDIT_LOGINUID unset usage */ #define AUDIT_LOGINUID_LEGACY 0x1 struct audit_field { u32 type; union { u32 val; kuid_t uid; kgid_t gid; struct { char *lsm_str; void *lsm_rule; }; }; u32 op; }; enum audit_ntp_type { AUDIT_NTP_OFFSET, AUDIT_NTP_FREQ, AUDIT_NTP_STATUS, AUDIT_NTP_TAI, AUDIT_NTP_TICK, AUDIT_NTP_ADJUST, AUDIT_NTP_NVALS /* count */ }; #ifdef CONFIG_AUDITSYSCALL struct audit_ntp_val { long long oldval, newval; }; struct audit_ntp_data { struct audit_ntp_val vals[AUDIT_NTP_NVALS]; }; #else struct audit_ntp_data {}; #endif enum audit_nfcfgop { AUDIT_XT_OP_REGISTER, AUDIT_XT_OP_REPLACE, AUDIT_XT_OP_UNREGISTER, AUDIT_NFT_OP_TABLE_REGISTER, AUDIT_NFT_OP_TABLE_UNREGISTER, AUDIT_NFT_OP_CHAIN_REGISTER, AUDIT_NFT_OP_CHAIN_UNREGISTER, AUDIT_NFT_OP_RULE_REGISTER, AUDIT_NFT_OP_RULE_UNREGISTER, AUDIT_NFT_OP_SET_REGISTER, AUDIT_NFT_OP_SET_UNREGISTER, AUDIT_NFT_OP_SETELEM_REGISTER, AUDIT_NFT_OP_SETELEM_UNREGISTER, AUDIT_NFT_OP_GEN_REGISTER, AUDIT_NFT_OP_OBJ_REGISTER, AUDIT_NFT_OP_OBJ_UNREGISTER, AUDIT_NFT_OP_OBJ_RESET, AUDIT_NFT_OP_FLOWTABLE_REGISTER, AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, AUDIT_NFT_OP_SETELEM_RESET, AUDIT_NFT_OP_RULE_RESET, AUDIT_NFT_OP_INVALID, }; extern int __init audit_register_class(int class, unsigned *list); extern int audit_classify_syscall(int abi, unsigned syscall); extern int audit_classify_arch(int arch); /* only for compat system calls */ extern unsigned compat_write_class[]; extern unsigned compat_read_class[]; extern unsigned compat_dir_class[]; extern unsigned compat_chattr_class[]; extern unsigned compat_signal_class[]; /* audit_names->type values */ #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ #define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */ #define AUDIT_TYPE_PARENT 2 /* a parent audit record */ #define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */ #define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */ /* maximized args number that audit_socketcall can process */ #define AUDITSC_ARGS 6 /* bit values for ->signal->audit_tty */ #define AUDIT_TTY_ENABLE BIT(0) #define AUDIT_TTY_LOG_PASSWD BIT(1) struct filename; #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ extern __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...); extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); extern __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); extern void audit_log_end(struct audit_buffer *ab); extern bool audit_string_contains_control(const char *string, size_t len); extern void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len); extern void audit_log_n_string(struct audit_buffer *ab, const char *buf, size_t n); extern void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t n); extern void audit_log_untrustedstring(struct audit_buffer *ab, const char *string); extern void audit_log_d_path(struct audit_buffer *ab, const char *prefix, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); extern void audit_log_path_denied(int type, const char *operation); extern void audit_log_lost(const char *message); extern int audit_log_task_context(struct audit_buffer *ab); extern void audit_log_task_info(struct audit_buffer *ab); extern int audit_update_lsm_rules(void); /* Private API (for audit.c only) */ extern int audit_rule_change(int type, int seq, void *data, size_t datasz); extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); extern int audit_set_loginuid(kuid_t loginuid); static inline kuid_t audit_get_loginuid(struct task_struct *tsk) { return tsk->loginuid; } static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return tsk->sessionid; } extern u32 audit_enabled; extern int audit_signal_info(int sig, struct task_struct *t); #else /* CONFIG_AUDIT */ static inline __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) { } static inline struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) { return NULL; } static inline __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) { } static inline void audit_log_end(struct audit_buffer *ab) { } static inline void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len) { } static inline void audit_log_n_string(struct audit_buffer *ab, const char *buf, size_t n) { } static inline void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t n) { } static inline void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) { } static inline void audit_log_d_path(struct audit_buffer *ab, const char *prefix, const struct path *path) { } static inline void audit_log_key(struct audit_buffer *ab, char *key) { } static inline void audit_log_path_denied(int type, const char *operation) { } static inline int audit_log_task_context(struct audit_buffer *ab) { return 0; } static inline void audit_log_task_info(struct audit_buffer *ab) { } static inline kuid_t audit_get_loginuid(struct task_struct *tsk) { return INVALID_UID; } static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return AUDIT_SID_UNSET; } #define audit_enabled AUDIT_OFF static inline int audit_signal_info(int sig, struct task_struct *t) { return 0; } #endif /* CONFIG_AUDIT */ #ifdef CONFIG_AUDIT_COMPAT_GENERIC #define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT)) #else #define audit_is_compat(arch) false #endif #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ #define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ #ifdef CONFIG_AUDITSYSCALL #include <asm/syscall.h> /* for syscall_get_arch() */ /* These are defined in auditsc.c */ /* Public API */ extern int audit_alloc(struct task_struct *task); extern void __audit_free(struct task_struct *task); extern void __audit_uring_entry(u8 op); extern void __audit_uring_exit(int success, long code); extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3); extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); extern void __audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type); extern void audit_seccomp(unsigned long syscall, long signr, int code); extern void audit_seccomp_actions_logged(const char *names, const char *old_names, int res); extern void __audit_ptrace(struct task_struct *t); static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) { task->audit_context = ctx; } static inline struct audit_context *audit_context(void) { return current->audit_context; } static inline bool audit_dummy_context(void) { void *p = audit_context(); return !p || *(int *)p; } static inline void audit_free(struct task_struct *task) { if (unlikely(task->audit_context)) __audit_free(task); } static inline void audit_uring_entry(u8 op) { /* * We intentionally check audit_context() before audit_enabled as most * Linux systems (as of ~2021) rely on systemd which forces audit to * be enabled regardless of the user's audit configuration. */ if (unlikely(audit_context() && audit_enabled)) __audit_uring_entry(op); } static inline void audit_uring_exit(int success, long code) { if (unlikely(audit_context())) __audit_uring_exit(success, code); } static inline void audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { if (unlikely(audit_context())) __audit_syscall_entry(major, a0, a1, a2, a3); } static inline void audit_syscall_exit(void *pt_regs) { if (unlikely(audit_context())) { int success = is_syscall_success(pt_regs); long return_code = regs_return_value(pt_regs); __audit_syscall_exit(success, return_code); } } static inline struct filename *audit_reusename(const __user char *name) { if (unlikely(!audit_dummy_context())) return __audit_reusename(name); return NULL; } static inline void audit_getname(struct filename *name) { if (unlikely(!audit_dummy_context())) __audit_getname(name); } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, aflags); } static inline void audit_file(struct file *file) { if (unlikely(!audit_dummy_context())) __audit_file(file); } static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); } static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { if (unlikely(!audit_dummy_context())) __audit_inode_child(parent, dentry, type); } void audit_core_dumps(long signr); static inline void audit_ptrace(struct task_struct *t) { if (unlikely(!audit_dummy_context())) __audit_ptrace(t); } /* Private API (for audit.c only) */ extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern void __audit_bprm(struct linux_binprm *bprm); extern int __audit_socketcall(int nargs, unsigned long *args); extern int __audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout); extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old); extern void __audit_log_capset(const struct cred *new, const struct cred *old); extern void __audit_mmap_fd(int fd, int flags); extern void __audit_openat2_how(struct open_how *how); extern void __audit_log_kern_module(char *name); extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar); extern void __audit_tk_injoffset(struct timespec64 offset); extern void __audit_ntp_log(const struct audit_ntp_data *ad); extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { if (unlikely(!audit_dummy_context())) __audit_ipc_obj(ipcp); } static inline void audit_fd_pair(int fd1, int fd2) { if (unlikely(!audit_dummy_context())) __audit_fd_pair(fd1, fd2); } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { if (unlikely(!audit_dummy_context())) __audit_ipc_set_perm(qbytes, uid, gid, mode); } static inline void audit_bprm(struct linux_binprm *bprm) { if (unlikely(!audit_dummy_context())) __audit_bprm(bprm); } static inline int audit_socketcall(int nargs, unsigned long *args) { if (unlikely(!audit_dummy_context())) return __audit_socketcall(nargs, args); return 0; } static inline int audit_socketcall_compat(int nargs, u32 *args) { unsigned long a[AUDITSC_ARGS]; int i; if (audit_dummy_context()) return 0; for (i = 0; i < nargs; i++) a[i] = (unsigned long)args[i]; return __audit_socketcall(nargs, a); } static inline int audit_sockaddr(int len, void *addr) { if (unlikely(!audit_dummy_context())) return __audit_sockaddr(len, addr); return 0; } static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { if (unlikely(!audit_dummy_context())) __audit_mq_open(oflag, mode, attr); } static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) { if (unlikely(!audit_dummy_context())) __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); } static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { if (unlikely(!audit_dummy_context())) __audit_mq_notify(mqdes, notification); } static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { if (unlikely(!audit_dummy_context())) __audit_mq_getsetattr(mqdes, mqstat); } static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { if (unlikely(!audit_dummy_context())) return __audit_log_bprm_fcaps(bprm, new, old); return 0; } static inline void audit_log_capset(const struct cred *new, const struct cred *old) { if (unlikely(!audit_dummy_context())) __audit_log_capset(new, old); } static inline void audit_mmap_fd(int fd, int flags) { if (unlikely(!audit_dummy_context())) __audit_mmap_fd(fd, flags); } static inline void audit_openat2_how(struct open_how *how) { if (unlikely(!audit_dummy_context())) __audit_openat2_how(how); } static inline void audit_log_kern_module(char *name) { if (!audit_dummy_context()) __audit_log_kern_module(name); } static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { if (!audit_dummy_context()) __audit_fanotify(response, friar); } static inline void audit_tk_injoffset(struct timespec64 offset) { /* ignore no-op events */ if (offset.tv_sec == 0 && offset.tv_nsec == 0) return; if (!audit_dummy_context()) __audit_tk_injoffset(offset); } static inline void audit_ntp_init(struct audit_ntp_data *ad) { memset(ad, 0, sizeof(*ad)); } static inline void audit_ntp_set_old(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { ad->vals[type].oldval = val; } static inline void audit_ntp_set_new(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { ad->vals[type].newval = val; } static inline void audit_ntp_log(const struct audit_ntp_data *ad) { if (!audit_dummy_context()) __audit_ntp_log(ad); } static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp) { if (audit_enabled) __audit_log_nfcfg(name, af, nentries, op, gfp); } extern int audit_n_rules; extern int audit_signals; #else /* CONFIG_AUDITSYSCALL */ static inline int audit_alloc(struct task_struct *task) { return 0; } static inline void audit_free(struct task_struct *task) { } static inline void audit_uring_entry(u8 op) { } static inline void audit_uring_exit(int success, long code) { } static inline void audit_syscall_entry(int major, unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3) { } static inline void audit_syscall_exit(void *pt_regs) { } static inline bool audit_dummy_context(void) { return true; } static inline void audit_set_context(struct task_struct *task, struct audit_context *ctx) { } static inline struct audit_context *audit_context(void) { return NULL; } static inline struct filename *audit_reusename(const __user char *name) { return NULL; } static inline void audit_getname(struct filename *name) { } static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { } static inline void audit_file(struct file *file) { } static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { } static inline void audit_inode_child(struct inode *parent, const struct dentry *dentry, const unsigned char type) { } static inline void audit_core_dumps(long signr) { } static inline void audit_seccomp(unsigned long syscall, long signr, int code) { } static inline void audit_seccomp_actions_logged(const char *names, const char *old_names, int res) { } static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { } static inline void audit_bprm(struct linux_binprm *bprm) { } static inline int audit_socketcall(int nargs, unsigned long *args) { return 0; } static inline int audit_socketcall_compat(int nargs, u32 *args) { return 0; } static inline void audit_fd_pair(int fd1, int fd2) { } static inline int audit_sockaddr(int len, void *addr) { return 0; } static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) { } static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) { } static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { } static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { } static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) { return 0; } static inline void audit_log_capset(const struct cred *new, const struct cred *old) { } static inline void audit_mmap_fd(int fd, int flags) { } static inline void audit_openat2_how(struct open_how *how) { } static inline void audit_log_kern_module(char *name) { } static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar) { } static inline void audit_tk_injoffset(struct timespec64 offset) { } static inline void audit_ntp_init(struct audit_ntp_data *ad) { } static inline void audit_ntp_set_old(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { } static inline void audit_ntp_set_new(struct audit_ntp_data *ad, enum audit_ntp_type type, long long val) { } static inline void audit_ntp_log(const struct audit_ntp_data *ad) { } static inline void audit_ptrace(struct task_struct *t) { } static inline void audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, enum audit_nfcfgop op, gfp_t gfp) { } #define audit_n_rules 0 #define audit_signals 0 #endif /* CONFIG_AUDITSYSCALL */ static inline bool audit_loginuid_set(struct task_struct *tsk) { return uid_valid(audit_get_loginuid(tsk)); } #endif
3 739 517 1208 517 1204 1208 1209 1208 1 1 1 1 1 26 26 26 26 26 26 26 26 26 26 26 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 // SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/fork.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * 'fork.c' contains the help-routines for the 'fork' system call * (see also entry.S and others). * Fork is rather simple, once you get the hang of it, but the memory * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' */ #include <linux/anon_inodes.h> #include <linux/slab.h> #include <linux/sched/autogroup.h> #include <linux/sched/mm.h> #include <linux/sched/user.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/stat.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/sched/cputime.h> #include <linux/sched/ext.h> #include <linux/seq_file.h> #include <linux/rtmutex.h> #include <linux/init.h> #include <linux/unistd.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/mempolicy.h> #include <linux/sem.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/kmsan.h> #include <linux/binfmts.h> #include <linux/mman.h> #include <linux/mmu_notifier.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/memblock.h> #include <linux/nsproxy.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/cgroup.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/seccomp.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/syscall_user_dispatch.h> #include <linux/jiffies.h> #include <linux/futex.h> #include <linux/compat.h> #include <linux/kthread.h> #include <linux/task_io_accounting_ops.h> #include <linux/rcupdate.h> #include <linux/ptrace.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/memcontrol.h> #include <linux/ftrace.h> #include <linux/proc_fs.h> #include <linux/profile.h> #include <linux/rmap.h> #include <linux/ksm.h> #include <linux/acct.h> #include <linux/userfaultfd_k.h> #include <linux/tsacct_kern.h> #include <linux/cn_proc.h> #include <linux/freezer.h> #include <linux/delayacct.h> #include <linux/taskstats_kern.h> #include <linux/tty.h> #include <linux/fs_struct.h> #include <linux/magic.h> #include <linux/perf_event.h> #include <linux/posix-timers.h> #include <linux/user-return-notifier.h> #include <linux/oom.h> #include <linux/khugepaged.h> #include <linux/signalfd.h> #include <linux/uprobes.h> #include <linux/aio.h> #include <linux/compiler.h> #include <linux/sysctl.h> #include <linux/kcov.h> #include <linux/livepatch.h> #include <linux/thread_info.h> #include <linux/stackleak.h> #include <linux/kasan.h> #include <linux/scs.h> #include <linux/io_uring.h> #include <linux/bpf.h> #include <linux/stackprotector.h> #include <linux/user_events.h> #include <linux/iommu.h> #include <linux/rseq.h> #include <uapi/linux/pidfd.h> #include <linux/pidfs.h> #include <linux/tick.h> #include <asm/pgalloc.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <trace/events/sched.h> #define CREATE_TRACE_POINTS #include <trace/events/task.h> #include <kunit/visibility.h> /* * Minimum number of threads to boot the kernel */ #define MIN_THREADS 20 /* * Maximum number of threads */ #define MAX_THREADS FUTEX_TID_MASK /* * Protected counters by write_lock_irq(&tasklist_lock) */ unsigned long total_forks; /* Handle normal Linux uptimes. */ int nr_threads; /* The idle threads do not count.. */ static int max_threads; /* tunable limit on nr_threads */ #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) static const char * const resident_page_types[] = { NAMED_ARRAY_INDEX(MM_FILEPAGES), NAMED_ARRAY_INDEX(MM_ANONPAGES), NAMED_ARRAY_INDEX(MM_SWAPENTS), NAMED_ARRAY_INDEX(MM_SHMEMPAGES), }; DEFINE_PER_CPU(unsigned long, process_counts) = 0; __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ #ifdef CONFIG_PROVE_RCU int lockdep_tasklist_lock_is_held(void) { return lockdep_is_held(&tasklist_lock); } EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); #endif /* #ifdef CONFIG_PROVE_RCU */ int nr_processes(void) { int cpu; int total = 0; for_each_possible_cpu(cpu) total += per_cpu(process_counts, cpu); return total; } void __weak arch_release_task_struct(struct task_struct *tsk) { } static struct kmem_cache *task_struct_cachep; static inline struct task_struct *alloc_task_struct_node(int node) { return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); } static inline void free_task_struct(struct task_struct *tsk) { kmem_cache_free(task_struct_cachep, tsk); } /* * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a * kmemcache based allocator. */ # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) # ifdef CONFIG_VMAP_STACK /* * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB * flush. Try to minimize the number of calls by caching stacks. */ #define NR_CACHED_STACKS 2 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); struct vm_stack { struct rcu_head rcu; struct vm_struct *stack_vm_area; }; static bool try_release_thread_stack_to_cache(struct vm_struct *vm) { unsigned int i; for (i = 0; i < NR_CACHED_STACKS; i++) { struct vm_struct *tmp = NULL; if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm)) return true; } return false; } static void thread_stack_free_rcu(struct rcu_head *rh) { struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu); if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area)) return; vfree(vm_stack); } static void thread_stack_delayed_free(struct task_struct *tsk) { struct vm_stack *vm_stack = tsk->stack; vm_stack->stack_vm_area = tsk->stack_vm_area; call_rcu(&vm_stack->rcu, thread_stack_free_rcu); } static int free_vm_stack_cache(unsigned int cpu) { struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); int i; for (i = 0; i < NR_CACHED_STACKS; i++) { struct vm_struct *vm_stack = cached_vm_stacks[i]; if (!vm_stack) continue; vfree(vm_stack->addr); cached_vm_stacks[i] = NULL; } return 0; } static int memcg_charge_kernel_stack(struct vm_struct *vm) { int i; int ret; int nr_charged = 0; BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); if (ret) goto err; nr_charged++; } return 0; err: for (i = 0; i < nr_charged; i++) memcg_kmem_uncharge_page(vm->pages[i], 0); return ret; } static int alloc_thread_stack_node(struct task_struct *tsk, int node) { struct vm_struct *vm; void *stack; int i; for (i = 0; i < NR_CACHED_STACKS; i++) { struct vm_struct *s; s = this_cpu_xchg(cached_stacks[i], NULL); if (!s) continue; /* Reset stack metadata. */ kasan_unpoison_range(s->addr, THREAD_SIZE); stack = kasan_reset_tag(s->addr); /* Clear stale pointers from reused stack. */ memset(stack, 0, THREAD_SIZE); if (memcg_charge_kernel_stack(s)) { vfree(s->addr); return -ENOMEM; } tsk->stack_vm_area = s; tsk->stack = stack; return 0; } /* * Allocated stacks are cached and later reused by new threads, * so memcg accounting is performed manually on assigning/releasing * stacks to tasks. Drop __GFP_ACCOUNT. */ stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP & ~__GFP_ACCOUNT, node, __builtin_return_address(0)); if (!stack) return -ENOMEM; vm = find_vm_area(stack); if (memcg_charge_kernel_stack(vm)) { vfree(stack); return -ENOMEM; } /* * We can't call find_vm_area() in interrupt context, and * free_thread_stack() can be called in interrupt context, * so cache the vm_struct. */ tsk->stack_vm_area = vm; stack = kasan_reset_tag(stack); tsk->stack = stack; return 0; } static void free_thread_stack(struct task_struct *tsk) { if (!try_release_thread_stack_to_cache(tsk->stack_vm_area)) thread_stack_delayed_free(tsk); tsk->stack = NULL; tsk->stack_vm_area = NULL; } # else /* !CONFIG_VMAP_STACK */ static void thread_stack_free_rcu(struct rcu_head *rh) { __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER); } static void thread_stack_delayed_free(struct task_struct *tsk) { struct rcu_head *rh = tsk->stack; call_rcu(rh, thread_stack_free_rcu); } static int alloc_thread_stack_node(struct task_struct *tsk, int node) { struct page *page = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); if (likely(page)) { tsk->stack = kasan_reset_tag(page_address(page)); return 0; } return -ENOMEM; } static void free_thread_stack(struct task_struct *tsk) { thread_stack_delayed_free(tsk); tsk->stack = NULL; } # endif /* CONFIG_VMAP_STACK */ # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */ static struct kmem_cache *thread_stack_cache; static void thread_stack_free_rcu(struct rcu_head *rh) { kmem_cache_free(thread_stack_cache, rh); } static void thread_stack_delayed_free(struct task_struct *tsk) { struct rcu_head *rh = tsk->stack; call_rcu(rh, thread_stack_free_rcu); } static int alloc_thread_stack_node(struct task_struct *tsk, int node) { unsigned long *stack; stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); stack = kasan_reset_tag(stack); tsk->stack = stack; return stack ? 0 : -ENOMEM; } static void free_thread_stack(struct task_struct *tsk) { thread_stack_delayed_free(tsk); tsk->stack = NULL; } void thread_stack_cache_init(void) { thread_stack_cache = kmem_cache_create_usercopy("thread_stack", THREAD_SIZE, THREAD_SIZE, 0, 0, THREAD_SIZE, NULL); BUG_ON(thread_stack_cache == NULL); } # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */ /* SLAB cache for signal_struct structures (tsk->signal) */ static struct kmem_cache *signal_cachep; /* SLAB cache for sighand_struct structures (tsk->sighand) */ struct kmem_cache *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ struct kmem_cache *files_cachep; /* SLAB cache for fs_struct structures (tsk->fs) */ struct kmem_cache *fs_cachep; /* SLAB cache for vm_area_struct structures */ static struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) { struct vm_area_struct *vma; vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!vma) return NULL; vma_init(vma, mm); return vma; } static void vm_area_init_from(const struct vm_area_struct *src, struct vm_area_struct *dest) { dest->vm_mm = src->vm_mm; dest->vm_ops = src->vm_ops; dest->vm_start = src->vm_start; dest->vm_end = src->vm_end; dest->anon_vma = src->anon_vma; dest->vm_pgoff = src->vm_pgoff; dest->vm_file = src->vm_file; dest->vm_private_data = src->vm_private_data; vm_flags_init(dest, src->vm_flags); memcpy(&dest->vm_page_prot, &src->vm_page_prot, sizeof(dest->vm_page_prot)); /* * src->shared.rb may be modified concurrently when called from * dup_mmap(), but the clone will reinitialize it. */ data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared))); memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx, sizeof(dest->vm_userfaultfd_ctx)); #ifdef CONFIG_ANON_VMA_NAME dest->anon_name = src->anon_name; #endif #ifdef CONFIG_SWAP memcpy(&dest->swap_readahead_info, &src->swap_readahead_info, sizeof(dest->swap_readahead_info)); #endif #ifndef CONFIG_MMU dest->vm_region = src->vm_region; #endif #ifdef CONFIG_NUMA dest->vm_policy = src->vm_policy; #endif } struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) return NULL; ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); ASSERT_EXCLUSIVE_WRITER(orig->vm_file); vm_area_init_from(orig, new); vma_lock_init(new, true); INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); dup_anon_vma_name(orig, new); /* track_pfn_copy() will later take care of copying internal state. */ if (unlikely(new->vm_flags & VM_PFNMAP)) untrack_pfn_clear(new); return new; } void vm_area_free(struct vm_area_struct *vma) { /* The vma should be detached while being destroyed. */ vma_assert_detached(vma); vma_numab_state_free(vma); free_anon_vma_name(vma); kmem_cache_free(vm_area_cachep, vma); } static void account_kernel_stack(struct task_struct *tsk, int account) { if (IS_ENABLED(CONFIG_VMAP_STACK)) { struct vm_struct *vm = task_stack_vm_area(tsk); int i; for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, account * (PAGE_SIZE / 1024)); } else { void *stack = task_stack_page(tsk); /* All stack pages are in the same node. */ mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB, account * (THREAD_SIZE / 1024)); } } void exit_task_stack_account(struct task_struct *tsk) { account_kernel_stack(tsk, -1); if (IS_ENABLED(CONFIG_VMAP_STACK)) { struct vm_struct *vm; int i; vm = task_stack_vm_area(tsk); for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) memcg_kmem_uncharge_page(vm->pages[i], 0); } } static void release_task_stack(struct task_struct *tsk) { if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) return; /* Better to leak the stack than to free prematurely */ free_thread_stack(tsk); } #ifdef CONFIG_THREAD_INFO_IN_TASK void put_task_stack(struct task_struct *tsk) { if (refcount_dec_and_test(&tsk->stack_refcount)) release_task_stack(tsk); } #endif void free_task(struct task_struct *tsk) { #ifdef CONFIG_SECCOMP WARN_ON_ONCE(tsk->seccomp.filter); #endif release_user_cpus_ptr(tsk); scs_release(tsk); #ifndef CONFIG_THREAD_INFO_IN_TASK /* * The task is finally done with both the stack and thread_info, * so free both. */ release_task_stack(tsk); #else /* * If the task had a separate stack allocation, it should be gone * by now. */ WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); #endif rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); arch_release_task_struct(tsk); if (tsk->flags & PF_KTHREAD) free_kthread_struct(tsk); bpf_task_storage_free(tsk); free_task_struct(tsk); } EXPORT_SYMBOL(free_task); static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) { struct file *exe_file; exe_file = get_mm_exe_file(oldmm); RCU_INIT_POINTER(mm->exe_file, exe_file); /* * We depend on the oldmm having properly denied write access to the * exe_file already. */ if (exe_file && exe_file_deny_write_access(exe_file)) pr_warn_once("exe_file_deny_write_access() failed in %s\n", __func__); } #ifdef CONFIG_MMU static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp; int retval; unsigned long charge = 0; LIST_HEAD(uf); VMA_ITERATOR(vmi, mm, 0); if (mmap_write_lock_killable(oldmm)) return -EINTR; flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* * Not linked in yet - no deadlock potential: */ mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); /* No ordering required: file already has been exposed. */ dup_mm_exe_file(mm, oldmm); mm->total_vm = oldmm->total_vm; mm->data_vm = oldmm->data_vm; mm->exec_vm = oldmm->exec_vm; mm->stack_vm = oldmm->stack_vm; /* Use __mt_dup() to efficiently build an identical maple tree. */ retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL); if (unlikely(retval)) goto out; mt_clear_in_rcu(vmi.mas.tree); for_each_vma(vmi, mpnt) { struct file *file; vma_start_write(mpnt); if (mpnt->vm_flags & VM_DONTCOPY) { retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start, mpnt->vm_end, GFP_KERNEL); if (retval) goto loop_out; vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); continue; } charge = 0; /* * Don't duplicate many vmas if we've been oom-killed (for * example) */ if (fatal_signal_pending(current)) { retval = -EINTR; goto loop_out; } if (mpnt->vm_flags & VM_ACCOUNT) { unsigned long len = vma_pages(mpnt); if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ goto fail_nomem; charge = len; } tmp = vm_area_dup(mpnt); if (!tmp) goto fail_nomem; retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; tmp->vm_mm = mm; retval = dup_userfaultfd(tmp, &uf); if (retval) goto fail_nomem_anon_vma_fork; if (tmp->vm_flags & VM_WIPEONFORK) { /* * VM_WIPEONFORK gets a clean slate in the child. * Don't prepare anon_vma until fault since we don't * copy page for current vma. */ tmp->anon_vma = NULL; } else if (anon_vma_fork(tmp, mpnt)) goto fail_nomem_anon_vma_fork; vm_flags_clear(tmp, VM_LOCKED_MASK); /* * Copy/update hugetlb private vma information. */ if (is_vm_hugetlb_page(tmp)) hugetlb_dup_vma_private(tmp); /* * Link the vma into the MT. After using __mt_dup(), memory * allocation is not necessary here, so it cannot fail. */ vma_iter_bulk_store(&vmi, tmp); mm->map_count++; if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); file = tmp->vm_file; if (file) { struct address_space *mapping = file->f_mapping; get_file(file); i_mmap_lock_write(mapping); if (vma_is_shared_maywrite(tmp)) mapping_allow_writable(mapping); flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } if (!(tmp->vm_flags & VM_WIPEONFORK)) retval = copy_page_range(tmp, mpnt); if (retval) { mpnt = vma_next(&vmi); goto loop_out; } } /* a new mm has just been created */ retval = arch_dup_mmap(oldmm, mm); loop_out: vma_iter_free(&vmi); if (!retval) { mt_set_in_rcu(vmi.mas.tree); ksm_fork(mm, oldmm); khugepaged_fork(mm, oldmm); } else { /* * The entire maple tree has already been duplicated. If the * mmap duplication fails, mark the failure point with * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, * stop releasing VMAs that have not been duplicated after this * point. */ if (mpnt) { mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); mas_store(&vmi.mas, XA_ZERO_ENTRY); /* Avoid OOM iterating a broken tree */ set_bit(MMF_OOM_SKIP, &mm->flags); } /* * The mm_struct is going to exit, but the locks will be dropped * first. Set the mm_struct as unstable is advisable as it is * not fully initialised. */ set_bit(MMF_UNSTABLE, &mm->flags); } out: mmap_write_unlock(mm); flush_tlb_mm(oldmm); mmap_write_unlock(oldmm); if (!retval) dup_userfaultfd_complete(&uf); else dup_userfaultfd_fail(&uf); return retval; fail_nomem_anon_vma_fork: mpol_put(vma_policy(tmp)); fail_nomem_policy: vm_area_free(tmp); fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); goto loop_out; } static inline int mm_alloc_pgd(struct mm_struct *mm) { mm->pgd = pgd_alloc(mm); if (unlikely(!mm->pgd)) return -ENOMEM; return 0; } static inline void mm_free_pgd(struct mm_struct *mm) { pgd_free(mm, mm->pgd); } #else static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { mmap_write_lock(oldmm); dup_mm_exe_file(mm, oldmm); mmap_write_unlock(oldmm); return 0; } #define mm_alloc_pgd(mm) (0) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ #ifdef CONFIG_MM_ID static DEFINE_IDA(mm_ida); static inline int mm_alloc_id(struct mm_struct *mm) { int ret; ret = ida_alloc_range(&mm_ida, MM_ID_MIN, MM_ID_MAX, GFP_KERNEL); if (ret < 0) return ret; mm->mm_id = ret; return 0; } static inline void mm_free_id(struct mm_struct *mm) { const mm_id_t id = mm->mm_id; mm->mm_id = MM_ID_DUMMY; if (id == MM_ID_DUMMY) return; if (WARN_ON_ONCE(id < MM_ID_MIN || id > MM_ID_MAX)) return; ida_free(&mm_ida, id); } #else /* !CONFIG_MM_ID */ static inline int mm_alloc_id(struct mm_struct *mm) { return 0; } static inline void mm_free_id(struct mm_struct *mm) {} #endif /* CONFIG_MM_ID */ static void check_mm(struct mm_struct *mm) { int i; BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, "Please make sure 'struct resident_page_types[]' is updated as well"); for (i = 0; i < NR_MM_COUNTERS; i++) { long x = percpu_counter_sum(&mm->rss_stat[i]); if (unlikely(x)) pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", mm, resident_page_types[i], x); } if (mm_pgtables_bytes(mm)) pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", mm_pgtables_bytes(mm)); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS) VM_BUG_ON_MM(mm->pmd_huge_pte, mm); #endif } #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) static void do_check_lazy_tlb(void *arg) { struct mm_struct *mm = arg; WARN_ON_ONCE(current->active_mm == mm); } static void do_shoot_lazy_tlb(void *arg) { struct mm_struct *mm = arg; if (current->active_mm == mm) { WARN_ON_ONCE(current->mm); current->active_mm = &init_mm; switch_mm(mm, &init_mm, current); } } static void cleanup_lazy_tlbs(struct mm_struct *mm) { if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) { /* * In this case, lazy tlb mms are refounted and would not reach * __mmdrop until all CPUs have switched away and mmdrop()ed. */ return; } /* * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it * requires lazy mm users to switch to another mm when the refcount * drops to zero, before the mm is freed. This requires IPIs here to * switch kernel threads to init_mm. * * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm * switch with the final userspace teardown TLB flush which leaves the * mm lazy on this CPU but no others, reducing the need for additional * IPIs here. There are cases where a final IPI is still required here, * such as the final mmdrop being performed on a different CPU than the * one exiting, or kernel threads using the mm when userspace exits. * * IPI overheads have not found to be expensive, but they could be * reduced in a number of possible ways, for example (roughly * increasing order of complexity): * - The last lazy reference created by exit_mm() could instead switch * to init_mm, however it's probable this will run on the same CPU * immediately afterwards, so this may not reduce IPIs much. * - A batch of mms requiring IPIs could be gathered and freed at once. * - CPUs store active_mm where it can be remotely checked without a * lock, to filter out false-positives in the cpumask. * - After mm_users or mm_count reaches zero, switching away from the * mm could clear mm_cpumask to reduce some IPIs, perhaps together * with some batching or delaying of the final IPIs. * - A delayed freeing and RCU-like quiescing sequence based on mm * switching to avoid IPIs completely. */ on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1); if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES)) on_each_cpu(do_check_lazy_tlb, (void *)mm, 1); } /* * Called when the last reference to the mm * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); WARN_ON_ONCE(mm == current->mm); /* Ensure no CPUs are using this as their lazy tlb mm */ cleanup_lazy_tlbs(mm); WARN_ON_ONCE(mm == current->active_mm); mm_free_pgd(mm); mm_free_id(mm); destroy_context(mm); mmu_notifier_subscriptions_destroy(mm); check_mm(mm); put_user_ns(mm->user_ns); mm_pasid_drop(mm); mm_destroy_cid(mm); percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); static void mmdrop_async_fn(struct work_struct *work) { struct mm_struct *mm; mm = container_of(work, struct mm_struct, async_put_work); __mmdrop(mm); } static void mmdrop_async(struct mm_struct *mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) { INIT_WORK(&mm->async_put_work, mmdrop_async_fn); schedule_work(&mm->async_put_work); } } static inline void free_signal_struct(struct signal_struct *sig) { taskstats_tgid_free(sig); sched_autogroup_exit(sig); /* * __mmdrop is not safe to call from softirq context on x86 due to * pgd_dtor so postpone it to the async context */ if (sig->oom_mm) mmdrop_async(sig->oom_mm); kmem_cache_free(signal_cachep, sig); } static inline void put_signal_struct(struct signal_struct *sig) { if (refcount_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); sched_ext_free(tsk); io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); security_task_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); sched_core_free(tsk); free_task(tsk); } EXPORT_SYMBOL_GPL(__put_task_struct); void __put_task_struct_rcu_cb(struct rcu_head *rhp) { struct task_struct *task = container_of(rhp, struct task_struct, rcu); __put_task_struct(task); } EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb); void __init __weak arch_task_cache_init(void) { } /* * set_max_threads */ static void __init set_max_threads(unsigned int max_threads_suggested) { u64 threads; unsigned long nr_pages = memblock_estimated_nr_free_pages(); /* * The number of threads shall be limited such that the thread * structures may only consume a small part of the available memory. */ if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) threads = MAX_THREADS; else threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, (u64) THREAD_SIZE * 8UL); if (threads > max_threads_suggested) threads = max_threads_suggested; max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); } #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT /* Initialized by the architecture: */ int arch_task_struct_size __read_mostly; #endif static void __init task_struct_whitelist(unsigned long *offset, unsigned long *size) { /* Fetch thread_struct whitelist for the architecture. */ arch_thread_struct_whitelist(offset, size); /* * Handle zero-sized whitelist or empty thread_struct, otherwise * adjust offset to position of thread_struct in task_struct. */ if (unlikely(*size == 0)) *offset = 0; else *offset += offsetof(struct task_struct, thread); } void __init fork_init(void) { int i; #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN 0 #endif int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); unsigned long useroffset, usersize; /* create a slab on which task_structs can be allocated */ task_struct_whitelist(&useroffset, &usersize); task_struct_cachep = kmem_cache_create_usercopy("task_struct", arch_task_struct_size, align, SLAB_PANIC|SLAB_ACCOUNT, useroffset, usersize, NULL); /* do the arch specific task caches init */ arch_task_cache_init(); set_max_threads(MAX_THREADS); init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; for (i = 0; i < UCOUNT_COUNTS; i++) init_user_ns.ucount_max[i] = max_threads/2; set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); #ifdef CONFIG_VMAP_STACK cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", NULL, free_vm_stack_cache); #endif scs_init(); lockdep_init_task(&init_task); uprobes_init(); } int __weak arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; return 0; } void set_task_stack_end_magic(struct task_struct *tsk) { unsigned long *stackend; stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ } static struct task_struct *dup_task_struct(struct task_struct *orig, int node) { struct task_struct *tsk; int err; if (node == NUMA_NO_NODE) node = tsk_fork_get_node(orig); tsk = alloc_task_struct_node(node); if (!tsk) return NULL; err = arch_dup_task_struct(tsk, orig); if (err) goto free_tsk; err = alloc_thread_stack_node(tsk, node); if (err) goto free_tsk; #ifdef CONFIG_THREAD_INFO_IN_TASK refcount_set(&tsk->stack_refcount, 1); #endif account_kernel_stack(tsk, 1); err = scs_prepare(tsk, node); if (err) goto free_stack; #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under * the sighand lock in case orig has changed between now and * then. Until then, filter must be NULL to avoid messing up * the usage counts on the error path calling free_task. */ tsk->seccomp.filter = NULL; #endif setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); set_task_stack_end_magic(tsk); clear_syscall_work_syscall_user_dispatch(tsk); #ifdef CONFIG_STACKPROTECTOR tsk->stack_canary = get_random_canary(); #endif if (orig->cpus_ptr == &orig->cpus_mask) tsk->cpus_ptr = &tsk->cpus_mask; dup_user_cpus_ptr(tsk, orig, node); /* * One for the user space visible state that goes away when reaped. * One for the scheduler. */ refcount_set(&tsk->rcu_users, 2); /* One for the rcu users */ refcount_set(&tsk->usage, 1); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; tsk->worker_private = NULL; kcov_task_init(tsk); kmsan_task_create(tsk); kmap_local_fork(tsk); #ifdef CONFIG_FAULT_INJECTION tsk->fail_nth = 0; #endif #ifdef CONFIG_BLK_CGROUP tsk->throttle_disk = NULL; tsk->use_memdelay = 0; #endif #ifdef CONFIG_ARCH_HAS_CPU_PASID tsk->pasid_activated = 0; #endif #ifdef CONFIG_MEMCG tsk->active_memcg = NULL; #endif #ifdef CONFIG_X86_BUS_LOCK_DETECT tsk->reported_split_lock = 0; #endif #ifdef CONFIG_SCHED_MM_CID tsk->mm_cid = -1; tsk->last_mm_cid = -1; tsk->mm_cid_active = 0; tsk->migrate_from_cpu = -1; #endif return tsk; free_stack: exit_task_stack_account(tsk); free_thread_stack(tsk); free_tsk: free_task_struct(tsk); return NULL; } __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; static int __init coredump_filter_setup(char *s) { default_dump_filter = (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & MMF_DUMP_FILTER_MASK; return 1; } __setup("coredump_filter=", coredump_filter_setup); #include <linux/init_task.h> static void mm_init_aio(struct mm_struct *mm) { #ifdef CONFIG_AIO spin_lock_init(&mm->ioctx_lock); mm->ioctx_table = NULL; #endif } static __always_inline void mm_clear_owner(struct mm_struct *mm, struct task_struct *p) { #ifdef CONFIG_MEMCG if (mm->owner == p) WRITE_ONCE(mm->owner, NULL); #endif } static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { #ifdef CONFIG_MEMCG mm->owner = p; #endif } static void mm_init_uprobes_state(struct mm_struct *mm) { #ifdef CONFIG_UPROBES mm->uprobes_state.xol_area = NULL; #endif } static void mmap_init_lock(struct mm_struct *mm) { init_rwsem(&mm->mmap_lock); mm_lock_seqcount_init(mm); #ifdef CONFIG_PER_VMA_LOCK rcuwait_init(&mm->vma_writer_wait); #endif } static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); seqcount_init(&mm->write_protect_seq); mmap_init_lock(mm); INIT_LIST_HEAD(&mm->mmlist); mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; atomic64_set(&mm->pinned_vm, 0); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->arg_lock); mm_init_cpumask(mm); mm_init_aio(mm); mm_init_owner(mm, p); mm_pasid_init(mm); RCU_INIT_POINTER(mm->exe_file, NULL); mmu_notifier_subscriptions_init(mm); init_tlb_flush_pending(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS) mm->pmd_huge_pte = NULL; #endif mm_init_uprobes_state(mm); hugetlb_count_init(mm); if (current->mm) { mm->flags = mmf_init_flags(current->mm->flags); mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; } else { mm->flags = default_dump_filter; mm->def_flags = 0; } if (mm_alloc_pgd(mm)) goto fail_nopgd; if (mm_alloc_id(mm)) goto fail_noid; if (init_new_context(p, mm)) goto fail_nocontext; if (mm_alloc_cid(mm, p)) goto fail_cid; if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, NR_MM_COUNTERS)) goto fail_pcpu; mm->user_ns = get_user_ns(user_ns); lru_gen_init_mm(mm); return mm; fail_pcpu: mm_destroy_cid(mm); fail_cid: destroy_context(mm); fail_nocontext: mm_free_id(mm); fail_noid: mm_free_pgd(mm); fail_nopgd: free_mm(mm); return NULL; } /* * Allocate and initialize an mm_struct. */ struct mm_struct *mm_alloc(void) { struct mm_struct *mm; mm = allocate_mm(); if (!mm) return NULL; memset(mm, 0, sizeof(*mm)); return mm_init(mm, current, current_user_ns()); } EXPORT_SYMBOL_IF_KUNIT(mm_alloc); static inline void __mmput(struct mm_struct *mm) { VM_BUG_ON(atomic_read(&mm->mm_users)); uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); mm_put_huge_zero_folio(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); list_del(&mm->mmlist); spin_unlock(&mmlist_lock); } if (mm->binfmt) module_put(mm->binfmt->module); lru_gen_del_mm(mm); mmdrop(mm); } /* * Decrement the use count and release all resources for an mm. */ void mmput(struct mm_struct *mm) { might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) __mmput(mm); } EXPORT_SYMBOL_GPL(mmput); #ifdef CONFIG_MMU static void mmput_async_fn(struct work_struct *work) { struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); __mmput(mm); } void mmput_async(struct mm_struct *mm) { if (atomic_dec_and_test(&mm->mm_users)) { INIT_WORK(&mm->async_put_work, mmput_async_fn); schedule_work(&mm->async_put_work); } } EXPORT_SYMBOL_GPL(mmput_async); #endif /** * set_mm_exe_file - change a reference to the mm's executable file * @mm: The mm to change. * @new_exe_file: The new file to use. * * This changes mm's executable file (shown as symlink /proc/[pid]/exe). * * Main users are mmput() and sys_execve(). Callers prevent concurrent * invocations: in mmput() nobody alive left, in execve it happens before * the new mm is made visible to anyone. * * Can only fail if new_exe_file != NULL. */ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { struct file *old_exe_file; /* * It is safe to dereference the exe_file without RCU as * this function is only called if nobody else can access * this mm -- see comment above for justification. */ old_exe_file = rcu_dereference_raw(mm->exe_file); if (new_exe_file) { /* * We expect the caller (i.e., sys_execve) to already denied * write access, so this is unlikely to fail. */ if (unlikely(exe_file_deny_write_access(new_exe_file))) return -EACCES; get_file(new_exe_file); } rcu_assign_pointer(mm->exe_file, new_exe_file); if (old_exe_file) { exe_file_allow_write_access(old_exe_file); fput(old_exe_file); } return 0; } /** * replace_mm_exe_file - replace a reference to the mm's executable file * @mm: The mm to change. * @new_exe_file: The new file to use. * * This changes mm's executable file (shown as symlink /proc/[pid]/exe). * * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). */ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { struct vm_area_struct *vma; struct file *old_exe_file; int ret = 0; /* Forbid mm->exe_file change if old file still mapped. */ old_exe_file = get_mm_exe_file(mm); if (old_exe_file) { VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); for_each_vma(vmi, vma) { if (!vma->vm_file) continue; if (path_equal(&vma->vm_file->f_path, &old_exe_file->f_path)) { ret = -EBUSY; break; } } mmap_read_unlock(mm); fput(old_exe_file); if (ret) return ret; } ret = exe_file_deny_write_access(new_exe_file); if (ret) return -EACCES; get_file(new_exe_file); /* set the new file */ mmap_write_lock(mm); old_exe_file = rcu_dereference_raw(mm->exe_file); rcu_assign_pointer(mm->exe_file, new_exe_file); mmap_write_unlock(mm); if (old_exe_file) { exe_file_allow_write_access(old_exe_file); fput(old_exe_file); } return 0; } /** * get_mm_exe_file - acquire a reference to the mm's executable file * @mm: The mm of interest. * * Returns %NULL if mm has no associated executable file. * User must release file via fput(). */ struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; rcu_read_lock(); exe_file = get_file_rcu(&mm->exe_file); rcu_read_unlock(); return exe_file; } /** * get_task_exe_file - acquire a reference to the task's executable file * @task: The task. * * Returns %NULL if task's mm (if any) has no associated executable file or * this is a kernel thread with borrowed mm (see the comment above get_task_mm). * User must release file via fput(). */ struct file *get_task_exe_file(struct task_struct *task) { struct file *exe_file = NULL; struct mm_struct *mm; if (task->flags & PF_KTHREAD) return NULL; task_lock(task); mm = task->mm; if (mm) exe_file = get_mm_exe_file(mm); task_unlock(task); return exe_file; } /** * get_task_mm - acquire a reference to the task's mm * @task: The task. * * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning * this kernel workthread has transiently adopted a user mm with use_mm, * to do its AIO) is not set and if so returns a reference to it, after * bumping up the use count. User must release the mm via mmput() * after use. Typically used by /proc and ptrace. */ struct mm_struct *get_task_mm(struct task_struct *task) { struct mm_struct *mm; if (task->flags & PF_KTHREAD) return NULL; task_lock(task); mm = task->mm; if (mm) mmget(mm); task_unlock(task); return mm; } EXPORT_SYMBOL_GPL(get_task_mm); static bool may_access_mm(struct mm_struct *mm, struct task_struct *task, unsigned int mode) { if (mm == current->mm) return true; if (ptrace_may_access(task, mode)) return true; if ((mode & PTRACE_MODE_READ) && perfmon_capable()) return true; return false; } struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) { struct mm_struct *mm; int err; err = down_read_killable(&task->signal->exec_update_lock); if (err) return ERR_PTR(err); mm = get_task_mm(task); if (!mm) { mm = ERR_PTR(-ESRCH); } else if (!may_access_mm(mm, task, mode)) { mmput(mm); mm = ERR_PTR(-EACCES); } up_read(&task->signal->exec_update_lock); return mm; } static void complete_vfork_done(struct task_struct *tsk) { struct completion *vfork; task_lock(tsk); vfork = tsk->vfork_done; if (likely(vfork)) { tsk->vfork_done = NULL; complete(vfork); } task_unlock(tsk); } static int wait_for_vfork_done(struct task_struct *child, struct completion *vfork) { unsigned int state = TASK_KILLABLE|TASK_FREEZABLE; int killed; cgroup_enter_frozen(); killed = wait_for_completion_state(vfork, state); cgroup_leave_frozen(false); if (killed) { task_lock(child); child->vfork_done = NULL; task_unlock(child); } put_task_struct(child); return killed; } /* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ static void mm_release(struct task_struct *tsk, struct mm_struct *mm) { uprobe_free_utask(tsk); /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* * Signal userspace if we're not exiting with a core dump * because we want to leave the value intact for debugging * purposes. */ if (tsk->clear_child_tid) { if (atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tsk->clear_child_tid); do_futex(tsk->clear_child_tid, FUTEX_WAKE, 1, NULL, NULL, 0, 0); } tsk->clear_child_tid = NULL; } /* * All done, finally we can wake up parent and return this mm to him. * Also kthread_stop() uses this completion for synchronization. */ if (tsk->vfork_done) complete_vfork_done(tsk); } void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) { futex_exit_release(tsk); mm_release(tsk, mm); } void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) { futex_exec_release(tsk); mm_release(tsk, mm); } /** * dup_mm() - duplicates an existing mm structure * @tsk: the task_struct with which the new mm will be associated. * @oldmm: the mm to duplicate. * * Allocates a new mm structure and duplicates the provided @oldmm structure * content into it. * * Return: the duplicated mm or NULL on failure. */ static struct mm_struct *dup_mm(struct task_struct *tsk, struct mm_struct *oldmm) { struct mm_struct *mm; int err; mm = allocate_mm(); if (!mm) goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); if (!mm_init(mm, tsk, mm->user_ns)) goto fail_nomem; uprobe_start_dup_mmap(); err = dup_mmap(mm, oldmm); if (err) goto free_pt; uprobe_end_dup_mmap(); mm->hiwater_rss = get_mm_rss(mm); mm->hiwater_vm = mm->total_vm; if (mm->binfmt && !try_module_get(mm->binfmt->module)) goto free_pt; return mm; free_pt: /* don't put binfmt in mmput, we haven't got module yet */ mm->binfmt = NULL; mm_init_owner(mm, NULL); mmput(mm); if (err) uprobe_end_dup_mmap(); fail_nomem: return NULL; } static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) { struct mm_struct *mm, *oldmm; tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; #ifdef CONFIG_DETECT_HUNG_TASK tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; tsk->last_switch_time = 0; #endif tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; if (clone_flags & CLONE_VM) { mmget(oldmm); mm = oldmm; } else { mm = dup_mm(tsk, current->mm); if (!mm) return -ENOMEM; } tsk->mm = mm; tsk->active_mm = mm; sched_mm_cid_fork(tsk); return 0; } static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ spin_lock(&fs->lock); /* "users" and "in_exec" locked for check_unsafe_exec() */ if (fs->in_exec) { spin_unlock(&fs->lock); return -EAGAIN; } fs->users++; spin_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; } static int copy_files(unsigned long clone_flags, struct task_struct *tsk, int no_files) { struct files_struct *oldf, *newf; /* * A background process may not have any files ... */ oldf = current->files; if (!oldf) return 0; if (no_files) { tsk->files = NULL; return 0; } if (clone_flags & CLONE_FILES) { atomic_inc(&oldf->count); return 0; } newf = dup_fd(oldf, NULL); if (IS_ERR(newf)) return PTR_ERR(newf); tsk->files = newf; return 0; } static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; if (clone_flags & CLONE_SIGHAND) { refcount_inc(&current->sighand->count); return 0; } sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); RCU_INIT_POINTER(tsk->sighand, sig); if (!sig) return -ENOMEM; refcount_set(&sig->count, 1); spin_lock_irq(&current->sighand->siglock); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); spin_unlock_irq(&current->sighand->siglock); /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ if (clone_flags & CLONE_CLEAR_SIGHAND) flush_signal_handlers(tsk, 0); return 0; } void __cleanup_sighand(struct sighand_struct *sighand) { if (refcount_dec_and_test(&sighand->count)) { signalfd_cleanup(sighand); /* * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it * without an RCU grace period, see __lock_task_sighand(). */ kmem_cache_free(sighand_cachep, sighand); } } /* * Initialize POSIX timer handling for a thread group. */ static void posix_cpu_timers_init_group(struct signal_struct *sig) { struct posix_cputimers *pct = &sig->posix_cputimers; unsigned long cpu_limit; cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); posix_cputimers_group_init(pct, cpu_limit); } static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; if (clone_flags & CLONE_THREAD) return 0; sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; sig->nr_threads = 1; sig->quick_threads = 1; atomic_set(&sig->live, 1); refcount_set(&sig->sigcnt, 1); /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); init_waitqueue_head(&sig->wait_chldexit); sig->curr_target = tsk; init_sigpending(&sig->shared_pending); INIT_HLIST_HEAD(&sig->multiprocess); seqlock_init(&sig->stats_lock); prev_cputime_init(&sig->prev_cputime); #ifdef CONFIG_POSIX_TIMERS INIT_HLIST_HEAD(&sig->posix_timers); INIT_HLIST_HEAD(&sig->ignored_posix_timers); hrtimer_setup(&sig->real_timer, it_real_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL); #endif task_lock(current->group_leader); memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); posix_cpu_timers_init_group(sig); tty_audit_fork(sig); sched_autogroup_fork(sig); sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj_min = current->signal->oom_score_adj_min; mutex_init(&sig->cred_guard_mutex); init_rwsem(&sig->exec_update_lock); return 0; } static void copy_seccomp(struct task_struct *p) { #ifdef CONFIG_SECCOMP /* * Must be called with sighand->lock held, which is common to * all threads in the group. Holding cred_guard_mutex is not * needed because this new task is not yet running and cannot * be racing exec. */ assert_spin_locked(&current->sighand->siglock); /* Ref-count the new filter user, and assign it. */ get_seccomp_filter(current); p->seccomp = current->seccomp; /* * Explicitly enable no_new_privs here in case it got set * between the task_struct being duplicated and holding the * sighand lock. The seccomp state and nnp must be in sync. */ if (task_no_new_privs(current)) task_set_no_new_privs(p); /* * If the parent gained a seccomp mode after copying thread * flags and between before we held the sighand lock, we have * to manually enable the seccomp thread flag here. */ if (p->seccomp.mode != SECCOMP_MODE_DISABLED) set_task_syscall_work(p, SECCOMP); #endif } SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) { current->clear_child_tid = tidptr; return task_pid_vnr(current); } static void rt_mutex_init_task(struct task_struct *p) { raw_spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES p->pi_waiters = RB_ROOT_CACHED; p->pi_top_task = NULL; p->pi_blocked_on = NULL; #endif } static inline void init_task_pid_links(struct task_struct *task) { enum pid_type type; for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) INIT_HLIST_NODE(&task->pid_links[type]); } static inline void init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { if (type == PIDTYPE_PID) task->thread_pid = pid; else task->signal->pids[type] = pid; } static inline void rcu_copy_process(struct task_struct *p) { #ifdef CONFIG_PREEMPT_RCU p->rcu_read_lock_nesting = 0; p->rcu_read_unlock_special.s = 0; p->rcu_blocked_node = NULL; INIT_LIST_HEAD(&p->rcu_node_entry); #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TASKS_RCU p->rcu_tasks_holdout = false; INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); p->rcu_tasks_idle_cpu = -1; INIT_LIST_HEAD(&p->rcu_tasks_exit_list); #endif /* #ifdef CONFIG_TASKS_RCU */ #ifdef CONFIG_TASKS_TRACE_RCU p->trc_reader_nesting = 0; p->trc_reader_special.s = 0; INIT_LIST_HEAD(&p->trc_holdout_list); INIT_LIST_HEAD(&p->trc_blkd_node); #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ } /** * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd * @pid: the struct pid for which to create a pidfd * @flags: flags of the new @pidfd * @ret: Where to return the file for the pidfd. * * Allocate a new file that stashes @pid and reserve a new pidfd number in the * caller's file descriptor table. The pidfd is reserved but not installed yet. * * The helper doesn't perform checks on @pid which makes it useful for pidfds * created via CLONE_PIDFD where @pid has no task attached when the pidfd and * pidfd file are prepared. * * If this function returns successfully the caller is responsible to either * call fd_install() passing the returned pidfd and pidfd file as arguments in * order to install the pidfd into its file descriptor table or they must use * put_unused_fd() and fput() on the returned pidfd and pidfd file * respectively. * * This function is useful when a pidfd must already be reserved but there * might still be points of failure afterwards and the caller wants to ensure * that no pidfd is leaked into its file descriptor table. * * Return: On success, a reserved pidfd is returned from the function and a new * pidfd file is returned in the last argument to the function. On * error, a negative error code is returned from the function and the * last argument remains unchanged. */ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) { struct file *pidfd_file; CLASS(get_unused_fd, pidfd)(O_CLOEXEC); if (pidfd < 0) return pidfd; pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR); if (IS_ERR(pidfd_file)) return PTR_ERR(pidfd_file); *ret = pidfd_file; return take_fd(pidfd); } /** * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd * @pid: the struct pid for which to create a pidfd * @flags: flags of the new @pidfd * @ret: Where to return the pidfd. * * Allocate a new file that stashes @pid and reserve a new pidfd number in the * caller's file descriptor table. The pidfd is reserved but not installed yet. * * The helper verifies that @pid is still in use, without PIDFD_THREAD the * task identified by @pid must be a thread-group leader. * * If this function returns successfully the caller is responsible to either * call fd_install() passing the returned pidfd and pidfd file as arguments in * order to install the pidfd into its file descriptor table or they must use * put_unused_fd() and fput() on the returned pidfd and pidfd file * respectively. * * This function is useful when a pidfd must already be reserved but there * might still be points of failure afterwards and the caller wants to ensure * that no pidfd is leaked into its file descriptor table. * * Return: On success, a reserved pidfd is returned from the function and a new * pidfd file is returned in the last argument to the function. On * error, a negative error code is returned from the function and the * last argument remains unchanged. */ int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) { bool thread = flags & PIDFD_THREAD; if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID)) return -EINVAL; return __pidfd_prepare(pid, flags, ret); } static void __delayed_free_task(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); free_task(tsk); } static __always_inline void delayed_free_task(struct task_struct *tsk) { if (IS_ENABLED(CONFIG_MEMCG)) call_rcu(&tsk->rcu, __delayed_free_task); else free_task(tsk); } static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) { /* Skip if kernel thread */ if (!tsk->mm) return; /* Skip if spawning a thread or using vfork */ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) return; /* We need to synchronize with __set_oom_adj */ mutex_lock(&oom_adj_mutex); set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); /* Update the values in case they were changed after copy_signal */ tsk->signal->oom_score_adj = current->signal->oom_score_adj; tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; mutex_unlock(&oom_adj_mutex); } #ifdef CONFIG_RV static void rv_task_fork(struct task_struct *p) { int i; for (i = 0; i < RV_PER_TASK_MONITORS; i++) p->rv[i].da_mon.monitoring = false; } #else #define rv_task_fork(p) do {} while (0) #endif /* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ __latent_entropy struct task_struct *copy_process( struct pid *pid, int trace, int node, struct kernel_clone_args *args) { int pidfd = -1, retval; struct task_struct *p; struct multiprocess_signals delayed; struct file *pidfile = NULL; const u64 clone_flags = args->flags; struct nsproxy *nsp = current->nsproxy; /* * Don't allow sharing the root directory with processes in a different * namespace */ if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); /* * If the new process will be in a different pid or user namespace * do not allow it to share a thread group with the forking task. */ if (clone_flags & CLONE_THREAD) { if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || (task_active_pid_ns(current) != nsp->pid_ns_for_children)) return ERR_PTR(-EINVAL); } if (clone_flags & CLONE_PIDFD) { /* * - CLONE_DETACHED is blocked so that we can potentially * reuse it later for CLONE_PIDFD. */ if (clone_flags & CLONE_DETACHED) return ERR_PTR(-EINVAL); } /* * Force any signals received before this point to be delivered * before the fork happens. Collect up signals sent to multiple * processes that happen during the fork and delay them so that * they appear to happen after the fork. */ sigemptyset(&delayed.signal); INIT_HLIST_NODE(&delayed.node); spin_lock_irq(&current->sighand->siglock); if (!(clone_flags & CLONE_THREAD)) hlist_add_head(&delayed.node, &current->signal->multiprocess); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); retval = -ERESTARTNOINTR; if (task_sigpending(current)) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current, node); if (!p) goto fork_out; p->flags &= ~PF_KTHREAD; if (args->kthread) p->flags |= PF_KTHREAD; if (args->user_worker) { /* * Mark us a user worker, and block any signal that isn't * fatal or STOP */ p->flags |= PF_USER_WORKER; siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); } if (args->io_thread) p->flags |= PF_IO_WORKER; if (args->name) strscpy_pad(p->comm, args->name, sizeof(p->comm)); p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; ftrace_graph_init_task(p); rt_mutex_init_task(p); lockdep_assert_irqs_enabled(); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; retval = -EAGAIN; if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) goto bad_fork_cleanup_count; } current->flags &= ~PF_NPROC_EXCEEDED; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (data_race(nr_threads >= max_threads)) goto bad_fork_cleanup_count; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); p->flags |= PF_FORKNOEXEC; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME p->utimescaled = p->stimescaled = 0; #endif prev_cputime_init(&p->prev_cputime); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_init(&p->vtime.seqcount); p->vtime.starttime = 0; p->vtime.state = VTIME_INACTIVE; #endif #ifdef CONFIG_IO_URING p->io_uring = NULL; #endif p->default_timer_slack_ns = current->timer_slack_ns; #ifdef CONFIG_PSI p->psi_flags = 0; #endif task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cputimers_init(&p->posix_cputimers); tick_dep_init_task(p); p->io_context = NULL; audit_set_context(p, NULL); cgroup_fork(p); if (args->kthread) { if (!set_kthread_struct(p)) goto bad_fork_cleanup_delayacct; } #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_delayacct; } #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); #endif #ifdef CONFIG_TRACE_IRQFLAGS memset(&p->irqtrace, 0, sizeof(p->irqtrace)); p->irqtrace.hardirq_disable_ip = _THIS_IP_; p->irqtrace.softirq_enable_ip = _THIS_IP_; p->softirqs_enabled = 1; p->softirq_context = 0; #endif p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP lockdep_init_task(p); #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_BCACHE p->sequential_io = 0; p->sequential_io_avg = 0; #endif #ifdef CONFIG_BPF_SYSCALL RCU_INIT_POINTER(p->bpf_storage, NULL); p->bpf_ctx = NULL; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ retval = sched_fork(clone_flags, p); if (retval) goto bad_fork_cleanup_policy; retval = perf_event_init_task(p, clone_flags); if (retval) goto bad_fork_sched_cancel_fork; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; /* copy all the process information */ shm_init_task(p); retval = security_task_alloc(p, clone_flags); if (retval) goto bad_fork_cleanup_audit; retval = copy_semundo(clone_flags, p); if (retval) goto bad_fork_cleanup_security; retval = copy_files(clone_flags, p, args->no_files); if (retval) goto bad_fork_cleanup_semundo; retval = copy_fs(clone_flags, p); if (retval) goto bad_fork_cleanup_files; retval = copy_sighand(clone_flags, p); if (retval) goto bad_fork_cleanup_fs; retval = copy_signal(clone_flags, p); if (retval) goto bad_fork_cleanup_sighand; retval = copy_mm(clone_flags, p); if (retval) goto bad_fork_cleanup_signal; retval = copy_namespaces(clone_flags, p); if (retval) goto bad_fork_cleanup_mm; retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; retval = copy_thread(p, args); if (retval) goto bad_fork_cleanup_io; stackleak_task_init(p); if (pid != &init_struct_pid) { pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid, args->set_tid_size); if (IS_ERR(pid)) { retval = PTR_ERR(pid); goto bad_fork_cleanup_thread; } } /* * This has to happen after we've potentially unshared the file * descriptor table (so that the pidfd doesn't leak into the child * if the fd table isn't shared). */ if (clone_flags & CLONE_PIDFD) { int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0; /* * Note that no task has been attached to @pid yet indicate * that via CLONE_PIDFD. */ retval = __pidfd_prepare(pid, flags | PIDFD_CLONE, &pidfile); if (retval < 0) goto bad_fork_free_pid; pidfd = retval; retval = put_user(pidfd, args->pidfd); if (retval) goto bad_fork_put_pidfd; } #ifdef CONFIG_BLOCK p->plug = NULL; #endif futex_init_task(p); /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) sas_ss_reset(p); /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_task_syscall_work(p, SYSCALL_TRACE); #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) clear_task_syscall_work(p, SYSCALL_EMU); #endif clear_tsk_latency_tracing(p); /* ok, now we should be set up.. */ p->pid = pid_nr(pid); if (clone_flags & CLONE_THREAD) { p->group_leader = current->group_leader; p->tgid = current->tgid; } else { p->group_leader = p; p->tgid = p->pid; } p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; p->pdeath_signal = 0; p->task_works = NULL; clear_posix_cputimers_work(p); #ifdef CONFIG_KRETPROBES p->kretprobe_instances.first = NULL; #endif #ifdef CONFIG_RETHOOK p->rethooks.first = NULL; #endif /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted that the new process's css_set can be changed * between here and cgroup_post_fork() if an organisation operation is in * progress. */ retval = cgroup_can_fork(p, args); if (retval) goto bad_fork_put_pidfd; /* * Now that the cgroups are pinned, re-clone the parent cgroup and put * the new task on the correct runqueue. All this *before* the task * becomes visible. * * This isn't part of ->can_fork() because while the re-cloning is * cgroup specific, it unconditionally needs to place the task on a * runqueue. */ retval = sched_cgroup_fork(p, args); if (retval) goto bad_fork_cancel_cgroup; /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do * not want user-space to be able to predict the process start-time by * stalling fork(2) after we recorded the start_time but before it is * visible to the system. */ p->start_time = ktime_get_ns(); p->start_boottime = ktime_get_boottime_ns(); /* * Make it visible to the rest of the system, but dont wake it up yet. * Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; if (clone_flags & CLONE_THREAD) p->exit_signal = -1; else p->exit_signal = current->group_leader->exit_signal; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; p->exit_signal = args->exit_signal; } klp_copy_process(p); sched_core_fork(p); spin_lock(&current->sighand->siglock); rv_task_fork(p); rseq_fork(p, clone_flags); /* Don't start children in a dying pid namespace */ if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { retval = -ENOMEM; goto bad_fork_core_free; } /* Let kill terminate clone/fork in the middle */ if (fatal_signal_pending(current)) { retval = -EINTR; goto bad_fork_core_free; } /* No more failure paths after this point. */ /* * Copy seccomp details explicitly here, in case they were changed * before holding sighand lock. */ copy_seccomp(p); init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); init_task_pid(p, PIDTYPE_PID, pid); if (thread_group_leader(p)) { init_task_pid(p, PIDTYPE_TGID, pid); init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); init_task_pid(p, PIDTYPE_SID, task_session(current)); if (is_child_reaper(pid)) { ns_of_pid(pid)->child_reaper = p; p->signal->flags |= SIGNAL_UNKILLABLE; } p->signal->shared_pending.signal = delayed.signal; p->signal->tty = tty_kref_get(current->signal->tty); /* * Inherit has_child_subreaper flag under the same * tasklist_lock with adding child to the process tree * for propagate_has_child_subreaper optimization. */ p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || p->real_parent->signal->is_child_subreaper; list_add_tail(&p->sibling, &p->real_parent->children); list_add_tail_rcu(&p->tasks, &init_task.tasks); attach_pid(p, PIDTYPE_TGID); attach_pid(p, PIDTYPE_PGID); attach_pid(p, PIDTYPE_SID); __this_cpu_inc(process_counts); } else { current->signal->nr_threads++; current->signal->quick_threads++; atomic_inc(&current->signal->live); refcount_inc(&current->signal->sigcnt); task_join_group_stop(p); list_add_tail_rcu(&p->thread_node, &p->signal->thread_head); } attach_pid(p, PIDTYPE_PID); nr_threads++; } total_forks++; hlist_del_init(&delayed.node); spin_unlock(&current->sighand->siglock); syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); if (pidfile) fd_install(pidfd, pidfile); proc_fork_connector(p); sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); trace_task_newtask(p, clone_flags); uprobe_copy_process(p, clone_flags); user_events_fork(p, clone_flags); copy_oom_score_adj(clone_flags, p); return p; bad_fork_core_free: sched_core_free(p); spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); bad_fork_cancel_cgroup: cgroup_cancel_fork(p, args); bad_fork_put_pidfd: if (clone_flags & CLONE_PIDFD) { fput(pidfile); put_unused_fd(pidfd); } bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_thread: exit_thread(p); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) { mm_clear_owner(p->mm, p); mmput(p->mm); } bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_security: security_task_free(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_sched_cancel_fork: sched_cancel_fork(p); bad_fork_cleanup_policy: lockdep_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); #endif bad_fork_cleanup_delayacct: delayacct_tsk_free(p); bad_fork_cleanup_count: dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); exit_creds(p); bad_fork_free: WRITE_ONCE(p->__state, TASK_DEAD); exit_task_stack_account(p); put_task_stack(p); delayed_free_task(p); fork_out: spin_lock_irq(&current->sighand->siglock); hlist_del_init(&delayed.node); spin_unlock_irq(&current->sighand->siglock); return ERR_PTR(retval); } static inline void init_idle_pids(struct task_struct *idle) { enum pid_type type; for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ init_task_pid(idle, type, &init_struct_pid); } } static int idle_dummy(void *dummy) { /* This function is never called */ return 0; } struct task_struct * __init fork_idle(int cpu) { struct task_struct *task; struct kernel_clone_args args = { .flags = CLONE_VM, .fn = &idle_dummy, .fn_arg = NULL, .kthread = 1, .idle = 1, }; task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); if (!IS_ERR(task)) { init_idle_pids(task); init_idle(task, cpu); } return task; } /* * This is like kernel_clone(), but shaved down and tailored to just * creating io_uring workers. It returns a created task, or an error pointer. * The returned task is inactive, and the caller must fire it up through * wake_up_new_task(p). All signals are blocked in the created task. */ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) { unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| CLONE_IO; struct kernel_clone_args args = { .flags = ((lower_32_bits(flags) | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, .io_thread = 1, .user_worker = 1, }; return copy_process(NULL, 0, node, &args); } /* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. * * args->exit_signal is expected to be checked for sanity by the caller. */ pid_t kernel_clone(struct kernel_clone_args *args) { u64 clone_flags = args->flags; struct completion vfork; struct pid *pid; struct task_struct *p; int trace = 0; pid_t nr; /* * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate * field in struct clone_args and it still doesn't make sense to have * them both point at the same memory location. Performing this check * here has the advantage that we don't need to have a separate helper * to check for legacy clone(). */ if ((clone_flags & CLONE_PIDFD) && (clone_flags & CLONE_PARENT_SETTID) && (args->pidfd == args->parent_tid)) return -EINVAL; /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if (args->exit_signal != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(NULL, trace, NUMA_NO_NODE, args); add_latent_entropy(); if (IS_ERR(p)) return PTR_ERR(p); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, args->parent_tid); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) { /* lock the task to synchronize with memcg migration */ task_lock(p); lru_gen_add_mm(p->mm); task_unlock(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); return nr; } /* * Create a kernel thread. */ pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, unsigned long flags) { struct kernel_clone_args args = { .flags = ((lower_32_bits(flags) | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, .name = name, .kthread = 1, }; return kernel_clone(&args); } /* * Create a user mode thread. */ pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct kernel_clone_args args = { .flags = ((lower_32_bits(flags) | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, }; return kernel_clone(&args); } #ifdef __ARCH_WANT_SYS_FORK SYSCALL_DEFINE0(fork) { #ifdef CONFIG_MMU struct kernel_clone_args args = { .exit_signal = SIGCHLD, }; return kernel_clone(&args); #else /* can not support in nommu mode */ return -EINVAL; #endif } #endif #ifdef __ARCH_WANT_SYS_VFORK SYSCALL_DEFINE0(vfork) { struct kernel_clone_args args = { .flags = CLONE_VFORK | CLONE_VM, .exit_signal = SIGCHLD, }; return kernel_clone(&args); } #endif #ifdef __ARCH_WANT_SYS_CLONE #ifdef CONFIG_CLONE_BACKWARDS SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, unsigned long, tls, int __user *, child_tidptr) #elif defined(CONFIG_CLONE_BACKWARDS2) SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #elif defined(CONFIG_CLONE_BACKWARDS3) SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, int, stack_size, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #endif { struct kernel_clone_args args = { .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), .pidfd = parent_tidptr, .child_tid = child_tidptr, .parent_tid = parent_tidptr, .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), .stack = newsp, .tls = tls, }; return kernel_clone(&args); } #endif noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, struct clone_args __user *uargs, size_t usize) { int err; struct clone_args args; pid_t *kset_tid = kargs->set_tid; BUILD_BUG_ON(offsetofend(struct clone_args, tls) != CLONE_ARGS_SIZE_VER0); BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != CLONE_ARGS_SIZE_VER1); BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != CLONE_ARGS_SIZE_VER2); BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); if (unlikely(usize > PAGE_SIZE)) return -E2BIG; if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) return -EINVAL; err = copy_struct_from_user(&args, sizeof(args), uargs, usize); if (err) return err; if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) return -EINVAL; if (unlikely(!args.set_tid && args.set_tid_size > 0)) return -EINVAL; if (unlikely(args.set_tid && args.set_tid_size == 0)) return -EINVAL; /* * Verify that higher 32bits of exit_signal are unset and that * it is a valid signal */ if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || !valid_signal(args.exit_signal))) return -EINVAL; if ((args.flags & CLONE_INTO_CGROUP) && (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) return -EINVAL; *kargs = (struct kernel_clone_args){ .flags = args.flags, .pidfd = u64_to_user_ptr(args.pidfd), .child_tid = u64_to_user_ptr(args.child_tid), .parent_tid = u64_to_user_ptr(args.parent_tid), .exit_signal = args.exit_signal, .stack = args.stack, .stack_size = args.stack_size, .tls = args.tls, .set_tid_size = args.set_tid_size, .cgroup = args.cgroup, }; if (args.set_tid && copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid), (kargs->set_tid_size * sizeof(pid_t)))) return -EFAULT; kargs->set_tid = kset_tid; return 0; } /** * clone3_stack_valid - check and prepare stack * @kargs: kernel clone args * * Verify that the stack arguments userspace gave us are sane. * In addition, set the stack direction for userspace since it's easy for us to * determine. */ static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) { if (kargs->stack == 0) { if (kargs->stack_size > 0) return false; } else { if (kargs->stack_size == 0) return false; if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) return false; #if !defined(CONFIG_STACK_GROWSUP) kargs->stack += kargs->stack_size; #endif } return true; } static bool clone3_args_valid(struct kernel_clone_args *kargs) { /* Verify that no unknown flags are passed along. */ if (kargs->flags & ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) return false; /* * - make the CLONE_DETACHED bit reusable for clone3 * - make the CSIGNAL bits reusable for clone3 */ if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) return false; if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) return false; if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && kargs->exit_signal) return false; if (!clone3_stack_valid(kargs)) return false; return true; } /** * sys_clone3 - create a new process with specific properties * @uargs: argument structure * @size: size of @uargs * * clone3() is the extensible successor to clone()/clone2(). * It takes a struct as argument that is versioned by its size. * * Return: On success, a positive PID for the child process. * On error, a negative errno number. */ SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) { int err; struct kernel_clone_args kargs; pid_t set_tid[MAX_PID_NS_LEVEL]; #ifdef __ARCH_BROKEN_SYS_CLONE3 #warning clone3() entry point is missing, please fix return -ENOSYS; #endif kargs.set_tid = set_tid; err = copy_clone_args_from_user(&kargs, uargs, size); if (err) return err; if (!clone3_args_valid(&kargs)) return -EINVAL; return kernel_clone(&kargs); } void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) { struct task_struct *leader, *parent, *child; int res; read_lock(&tasklist_lock); leader = top = top->group_leader; down: for_each_thread(leader, parent) { list_for_each_entry(child, &parent->children, sibling) { res = visitor(child, data); if (res) { if (res < 0) goto out; leader = child; goto down; } up: ; } } if (leader != top) { child = leader; parent = child->real_parent; leader = parent->group_leader; goto up; } out: read_unlock(&tasklist_lock); } #ifndef ARCH_MIN_MMSTRUCT_ALIGN #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif static void sighand_ctor(void *data) { struct sighand_struct *sighand = data; spin_lock_init(&sighand->siglock); init_waitqueue_head(&sighand->signalfd_wqh); } void __init mm_cache_init(void) { unsigned int mm_size; /* * The mm_cpumask is located at the end of mm_struct, and is * dynamically sized based on the maximum CPU number this system * can have, taking hotplug into account (nr_cpu_ids). */ mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size(); mm_cachep = kmem_cache_create_usercopy("mm_struct", mm_size, ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, offsetof(struct mm_struct, saved_auxv), sizeof_field(struct mm_struct, saved_auxv), NULL); } void __init proc_caches_init(void) { struct kmem_cache_args args = { .use_freeptr_offset = true, .freeptr_offset = offsetof(struct vm_area_struct, vm_freeptr), }; sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| SLAB_ACCOUNT, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); vm_area_cachep = kmem_cache_create("vm_area_struct", sizeof(struct vm_area_struct), &args, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); } /* * Check constraints on flags passed to the unshare system call. */ static int check_unshare_flags(unsigned long unshare_flags) { if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| CLONE_NEWTIME)) return -EINVAL; /* * Not implemented, but pretend it works if there is nothing * to unshare. Note that unsharing the address space or the * signal handlers also need to unshare the signal queues (aka * CLONE_THREAD). */ if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { if (!thread_group_empty(current)) return -EINVAL; } if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { if (refcount_read(&current->sighand->count) > 1) return -EINVAL; } if (unshare_flags & CLONE_VM) { if (!current_is_single_threaded()) return -EINVAL; } return 0; } /* * Unshare the filesystem structure if it is being shared */ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) { struct fs_struct *fs = current->fs; if (!(unshare_flags & CLONE_FS) || !fs) return 0; /* don't need lock here; in the worst case we'll do useless copy */ if (fs->users == 1) return 0; *new_fsp = copy_fs_struct(fs); if (!*new_fsp) return -ENOMEM; return 0; } /* * Unshare file descriptor table if it is being shared */ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) { struct files_struct *fd = current->files; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { fd = dup_fd(fd, NULL); if (IS_ERR(fd)) return PTR_ERR(fd); *new_fdp = fd; } return 0; } /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* * functions used by kernel_clone() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */ int ksys_unshare(unsigned long unshare_flags) { struct fs_struct *fs, *new_fs = NULL; struct files_struct *new_fd = NULL; struct cred *new_cred = NULL; struct nsproxy *new_nsproxy = NULL; int do_sysvsem = 0; int err; /* * If unsharing a user namespace must also unshare the thread group * and unshare the filesystem root and working directories. */ if (unshare_flags & CLONE_NEWUSER) unshare_flags |= CLONE_THREAD | CLONE_FS; /* * If unsharing vm, must also unshare signal handlers. */ if (unshare_flags & CLONE_VM) unshare_flags |= CLONE_SIGHAND; /* * If unsharing a signal handlers, must also unshare the signal queues. */ if (unshare_flags & CLONE_SIGHAND) unshare_flags |= CLONE_THREAD; /* * If unsharing namespace, must also unshare filesystem information. */ if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; /* * CLONE_NEWIPC must also detach from the undolist: after switching * to a new ipc namespace, the semaphore arrays from the old * namespace are unreachable. */ if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) do_sysvsem = 1; err = unshare_fs(unshare_flags, &new_fs); if (err) goto bad_unshare_out; err = unshare_fd(unshare_flags, &new_fd); if (err) goto bad_unshare_cleanup_fs; err = unshare_userns(unshare_flags, &new_cred); if (err) goto bad_unshare_cleanup_fd; err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_cred, new_fs); if (err) goto bad_unshare_cleanup_cred; if (new_cred) { err = set_cred_ucounts(new_cred); if (err) goto bad_unshare_cleanup_cred; } if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { if (do_sysvsem) { /* * CLONE_SYSVSEM is equivalent to sys_exit(). */ exit_sem(current); } if (unshare_flags & CLONE_NEWIPC) { /* Orphan segments in old ns (see sem above). */ exit_shm(current); shm_init_task(current); } if (new_nsproxy) switch_task_namespaces(current, new_nsproxy); task_lock(current); if (new_fs) { fs = current->fs; spin_lock(&fs->lock); current->fs = new_fs; if (--fs->users) new_fs = NULL; else new_fs = fs; spin_unlock(&fs->lock); } if (new_fd) swap(current->files, new_fd); task_unlock(current); if (new_cred) { /* Install the new user namespace */ commit_creds(new_cred); new_cred = NULL; } } perf_event_namespaces(current); bad_unshare_cleanup_cred: if (new_cred) put_cred(new_cred); bad_unshare_cleanup_fd: if (new_fd) put_files_struct(new_fd); bad_unshare_cleanup_fs: if (new_fs) free_fs_struct(new_fs); bad_unshare_out: return err; } SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) { return ksys_unshare(unshare_flags); } /* * Helper to unshare the files of the current task. * We don't want to expose copy_files internals to * the exec layer of the kernel. */ int unshare_files(void) { struct task_struct *task = current; struct files_struct *old, *copy = NULL; int error; error = unshare_fd(CLONE_FILES, &copy); if (error || !copy) return error; old = task->files; task_lock(task); task->files = copy; task_unlock(task); put_files_struct(old); return 0; } int sysctl_max_threads(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int ret; int threads = max_threads; int min = 1; int max = MAX_THREADS; t = *table; t.data = &threads; t.extra1 = &min; t.extra2 = &max; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || !write) return ret; max_threads = threads; return 0; }
7 17 17 17 17 17 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2001 Jens Axboe <axboe@suse.de> */ #ifndef __LINUX_BIO_H #define __LINUX_BIO_H #include <linux/mempool.h> /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include <linux/blk_types.h> #include <linux/uio.h> #define BIO_MAX_VECS 256U struct queue_limits; static inline unsigned int bio_max_segs(unsigned int nr_segs) { return min(nr_segs, BIO_MAX_VECS); } #define bio_iter_iovec(bio, iter) \ bvec_iter_bvec((bio)->bi_io_vec, (iter)) #define bio_iter_page(bio, iter) \ bvec_iter_page((bio)->bi_io_vec, (iter)) #define bio_iter_len(bio, iter) \ bvec_iter_len((bio)->bi_io_vec, (iter)) #define bio_iter_offset(bio, iter) \ bvec_iter_offset((bio)->bi_io_vec, (iter)) #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) /* * Return the data direction, READ or WRITE. */ #define bio_data_dir(bio) \ (op_is_write(bio_op(bio)) ? WRITE : READ) /* * Check whether this bio carries any data or not. A NULL bio is allowed. */ static inline bool bio_has_data(struct bio *bio) { if (bio && bio->bi_iter.bi_size && bio_op(bio) != REQ_OP_DISCARD && bio_op(bio) != REQ_OP_SECURE_ERASE && bio_op(bio) != REQ_OP_WRITE_ZEROES) return true; return false; } static inline bool bio_no_advance_iter(const struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || bio_op(bio) == REQ_OP_WRITE_ZEROES; } static inline void *bio_data(struct bio *bio) { if (bio_has_data(bio)) return page_address(bio_page(bio)) + bio_offset(bio); return NULL; } static inline bool bio_next_segment(const struct bio *bio, struct bvec_iter_all *iter) { if (iter->idx >= bio->bi_vcnt) return false; bvec_advance(&bio->bi_io_vec[iter->idx], iter); return true; } /* * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it */ #define bio_for_each_segment_all(bvl, bio, iter) \ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) static inline void bio_advance_iter(const struct bio *bio, struct bvec_iter *iter, unsigned int bytes) { iter->bi_sector += bytes >> 9; if (bio_no_advance_iter(bio)) iter->bi_size -= bytes; else bvec_iter_advance(bio->bi_io_vec, iter, bytes); /* TODO: It is reasonable to complete bio with error here. */ } /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ static inline void bio_advance_iter_single(const struct bio *bio, struct bvec_iter *iter, unsigned int bytes) { iter->bi_sector += bytes >> 9; if (bio_no_advance_iter(bio)) iter->bi_size -= bytes; else bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); } void __bio_advance(struct bio *, unsigned bytes); /** * bio_advance - increment/complete a bio by some number of bytes * @bio: bio to advance * @nbytes: number of bytes to complete * * This updates bi_sector, bi_size and bi_idx; if the number of bytes to * complete doesn't align with a bvec boundary, then bv_len and bv_offset will * be updated on the last bvec as well. * * @bio will then represent the remaining, uncompleted portion of the io. */ static inline void bio_advance(struct bio *bio, unsigned int nbytes) { if (nbytes == bio->bi_iter.bi_size) { bio->bi_iter.bi_size = 0; return; } __bio_advance(bio, nbytes); } #define __bio_for_each_segment(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bio_iter_iovec((bio), (iter))), 1); \ bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) #define bio_for_each_segment(bvl, bio, iter) \ __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) #define __bio_for_each_bvec(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) /* iterate over multi-page bvec */ #define bio_for_each_bvec(bvl, bio, iter) \ __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) /* * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the * same reasons as bio_for_each_segment_all(). */ #define bio_for_each_bvec_all(bvl, bio, i) \ for (i = 0, bvl = bio_first_bvec_all(bio); \ i < (bio)->bi_vcnt; i++, bvl++) #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) static inline unsigned bio_segments(struct bio *bio) { unsigned segs = 0; struct bio_vec bv; struct bvec_iter iter; /* * We special case discard/write same/write zeroes, because they * interpret bi_size differently: */ switch (bio_op(bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: return 0; default: break; } bio_for_each_segment(bv, bio, iter) segs++; return segs; } /* * get a reference to a bio, so it won't disappear. the intended use is * something like: * * bio_get(bio); * submit_bio(rw, bio); * if (bio->bi_flags ...) * do_something * bio_put(bio); * * without the bio_get(), it could potentially complete I/O before submit_bio * returns. and then bio would be freed memory when if (bio->bi_flags ...) * runs */ static inline void bio_get(struct bio *bio) { bio->bi_flags |= (1 << BIO_REFFED); smp_mb__before_atomic(); atomic_inc(&bio->__bi_cnt); } static inline void bio_cnt_set(struct bio *bio, unsigned int count) { if (count != 1) { bio->bi_flags |= (1 << BIO_REFFED); smp_mb(); } atomic_set(&bio->__bi_cnt, count); } static inline bool bio_flagged(struct bio *bio, unsigned int bit) { return bio->bi_flags & (1U << bit); } static inline void bio_set_flag(struct bio *bio, unsigned int bit) { bio->bi_flags |= (1U << bit); } static inline void bio_clear_flag(struct bio *bio, unsigned int bit) { bio->bi_flags &= ~(1U << bit); } static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); return bio->bi_io_vec; } static inline struct page *bio_first_page_all(struct bio *bio) { return bio_first_bvec_all(bio)->bv_page; } static inline struct folio *bio_first_folio_all(struct bio *bio) { return page_folio(bio_first_page_all(bio)); } static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); return &bio->bi_io_vec[bio->bi_vcnt - 1]; } /** * struct folio_iter - State for iterating all folios in a bio. * @folio: The current folio we're iterating. NULL after the last folio. * @offset: The byte offset within the current folio. * @length: The number of bytes in this iteration (will not cross folio * boundary). */ struct folio_iter { struct folio *folio; size_t offset; size_t length; /* private: for use by the iterator */ struct folio *_next; size_t _seg_count; int _i; }; static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, int i) { struct bio_vec *bvec = bio_first_bvec_all(bio) + i; if (unlikely(i >= bio->bi_vcnt)) { fi->folio = NULL; return; } fi->folio = page_folio(bvec->bv_page); fi->offset = bvec->bv_offset + PAGE_SIZE * (bvec->bv_page - &fi->folio->page); fi->_seg_count = bvec->bv_len; fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); fi->_next = folio_next(fi->folio); fi->_i = i; } static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) { fi->_seg_count -= fi->length; if (fi->_seg_count) { fi->folio = fi->_next; fi->offset = 0; fi->length = min(folio_size(fi->folio), fi->_seg_count); fi->_next = folio_next(fi->folio); } else { bio_first_folio(fi, bio, fi->_i + 1); } } /** * bio_for_each_folio_all - Iterate over each folio in a bio. * @fi: struct folio_iter which is updated for each folio. * @bio: struct bio to iterate over. */ #define bio_for_each_folio_all(fi, bio) \ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) void bio_trim(struct bio *bio, sector_t offset, sector_t size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim, unsigned *segs, unsigned max_bytes); /** * bio_next_split - get next @sectors from a bio, splitting if necessary * @bio: bio to split * @sectors: number of sectors to split from the front of @bio * @gfp: gfp mask * @bs: bio set to allocate from * * Return: a bio representing the next @sectors of @bio - if the bio is smaller * than @sectors, returns the original bio unchanged. */ static inline struct bio *bio_next_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) { if (sectors >= bio_sectors(bio)) return bio; return bio_split(bio, sectors, gfp, bs); } enum { BIOSET_NEED_BVECS = BIT(0), BIOSET_NEED_RESCUER = BIT(1), BIOSET_PERCPU_CACHE = BIT(2), }; extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); extern int biovec_init_pool(mempool_t *pool, int pool_entries); struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask, struct bio_set *bs); struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); extern void bio_put(struct bio *); struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, gfp_t gfp, struct bio_set *bs); int bio_init_clone(struct block_device *bdev, struct bio *bio, struct bio *bio_src, gfp_t gfp); extern struct bio_set fs_bio_set; static inline struct bio *bio_alloc(struct block_device *bdev, unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) { return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); } void submit_bio(struct bio *bio); extern void bio_endio(struct bio *); static inline void bio_io_error(struct bio *bio) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); } static inline void bio_wouldblock_error(struct bio *bio) { bio_set_flag(bio, BIO_QUIET); bio->bi_status = BLK_STS_AGAIN; bio_endio(bio); } /* * Calculate number of bvec segments that should be allocated to fit data * pointed by @iter. If @iter is backed by bvec it's going to be reused * instead of allocating a new one. */ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) { if (iov_iter_is_bvec(iter)) return 0; return iov_iter_npages(iter, max_segs); } struct request_queue; extern int submit_bio_wait(struct bio *bio); void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, unsigned short max_vecs, blk_opf_t opf); extern void bio_uninit(struct bio *); void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); void bio_chain(struct bio *, struct bio *); int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len, unsigned off); bool __must_check bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, size_t off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter); void __bio_release_pages(struct bio *bio, bool mark_dirty); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); extern void bio_free_pages(struct bio *bio); void guard_bio_eod(struct bio *bio); void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); static inline void zero_fill_bio(struct bio *bio) { zero_fill_bio_iter(bio, bio->bi_iter); } static inline void bio_release_pages(struct bio *bio, bool mark_dirty) { if (bio_flagged(bio, BIO_PAGE_PINNED)) __bio_release_pages(bio, mark_dirty); } #define bio_dev(bio) \ disk_devt((bio)->bi_bdev->bd_disk) #ifdef CONFIG_BLK_CGROUP void bio_associate_blkg(struct bio *bio); void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css); void bio_clone_blkg_association(struct bio *dst, struct bio *src); void blkcg_punt_bio_submit(struct bio *bio); #else /* CONFIG_BLK_CGROUP */ static inline void bio_associate_blkg(struct bio *bio) { } static inline void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) { } static inline void bio_clone_blkg_association(struct bio *dst, struct bio *src) { } static inline void blkcg_punt_bio_submit(struct bio *bio) { submit_bio(bio); } #endif /* CONFIG_BLK_CGROUP */ static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) { bio_clear_flag(bio, BIO_REMAPPED); if (bio->bi_bdev != bdev) bio_clear_flag(bio, BIO_BPS_THROTTLED); bio->bi_bdev = bdev; bio_associate_blkg(bio); } /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * * A bio_list anchors a singly-linked list of bios chained through the bi_next * member of the bio. The bio_list also caches the last list member to allow * fast access to the tail. */ struct bio_list { struct bio *head; struct bio *tail; }; static inline int bio_list_empty(const struct bio_list *bl) { return bl->head == NULL; } static inline void bio_list_init(struct bio_list *bl) { bl->head = bl->tail = NULL; } #define BIO_EMPTY_LIST { NULL, NULL } #define bio_list_for_each(bio, bl) \ for (bio = (bl)->head; bio; bio = bio->bi_next) static inline unsigned bio_list_size(const struct bio_list *bl) { unsigned sz = 0; struct bio *bio; bio_list_for_each(bio, bl) sz++; return sz; } static inline void bio_list_add(struct bio_list *bl, struct bio *bio) { bio->bi_next = NULL; if (bl->tail) bl->tail->bi_next = bio; else bl->head = bio; bl->tail = bio; } static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) { bio->bi_next = bl->head; bl->head = bio; if (!bl->tail) bl->tail = bio; } static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) { if (!bl2->head) return; if (bl->tail) bl->tail->bi_next = bl2->head; else bl->head = bl2->head; bl->tail = bl2->tail; } static inline void bio_list_merge_init(struct bio_list *bl, struct bio_list *bl2) { bio_list_merge(bl, bl2); bio_list_init(bl2); } static inline void bio_list_merge_head(struct bio_list *bl, struct bio_list *bl2) { if (!bl2->head) return; if (bl->head) bl2->tail->bi_next = bl->head; else bl->tail = bl2->tail; bl->head = bl2->head; } static inline struct bio *bio_list_peek(struct bio_list *bl) { return bl->head; } static inline struct bio *bio_list_pop(struct bio_list *bl) { struct bio *bio = bl->head; if (bio) { bl->head = bl->head->bi_next; if (!bl->head) bl->tail = NULL; bio->bi_next = NULL; } return bio; } static inline struct bio *bio_list_get(struct bio_list *bl) { struct bio *bio = bl->head; bl->head = bl->tail = NULL; return bio; } /* * Increment chain count for the bio. Make sure the CHAIN flag update * is visible before the raised count. */ static inline void bio_inc_remaining(struct bio *bio) { bio_set_flag(bio, BIO_CHAIN); smp_mb__before_atomic(); atomic_inc(&bio->__bi_remaining); } /* * bio_set is used to allow other portions of the IO system to * allocate their own private memory pools for bio and iovec structures. * These memory pools in turn all allocate from the bio_slab * and the bvec_slabs[]. */ #define BIO_POOL_SIZE 2 struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; /* * per-cpu bio alloc cache */ struct bio_alloc_cache __percpu *cache; mempool_t bio_pool; mempool_t bvec_pool; unsigned int back_pad; /* * Deadlock avoidance for stacking block drivers: see comments in * bio_alloc_bioset() for details */ spinlock_t rescue_lock; struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; /* * Hot un-plug notifier for the per-cpu cache, if used */ struct hlist_node cpuhp_dead; }; static inline bool bioset_initialized(struct bio_set *bs) { return bs->bio_slab != NULL; } /* * Mark a bio as polled. Note that for async polled IO, the caller must * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). * We cannot block waiting for requests on polled IO, as those completions * must be found by the caller. This is different than IRQ driven IO, where * it's safe to wait for IO to complete. */ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) { bio->bi_opf |= REQ_POLLED; if (kiocb->ki_flags & IOCB_NOWAIT) bio->bi_opf |= REQ_NOWAIT; } static inline void bio_clear_polled(struct bio *bio) { bio->bi_opf &= ~REQ_POLLED; } /** * bio_is_zone_append - is this a zone append bio? * @bio: bio to check * * Check if @bio is a zone append operation. Core block layer code and end_io * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if * it is not natively supported. */ static inline bool bio_is_zone_append(struct bio *bio) { if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) return false; return bio_op(bio) == REQ_OP_ZONE_APPEND || bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); } struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new); struct bio *blk_alloc_discard_bio(struct block_device *bdev, sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask); #endif /* __LINUX_BIO_H */
12 12 12 12 112 112 112 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 // SPDX-License-Identifier: GPL-2.0-or-later /* * NetLabel Unlabeled Support * * This file defines functions for dealing with unlabeled packets for the * NetLabel system. The NetLabel system manages static and dynamic label * mappings for network protocols such as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 - 2008 */ #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/socket.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/audit.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/security.h> #include <linux/slab.h> #include <net/sock.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/net_namespace.h> #include <net/netlabel.h> #include <asm/bug.h> #include <linux/atomic.h> #include "netlabel_user.h" #include "netlabel_addrlist.h" #include "netlabel_domainhash.h" #include "netlabel_unlabeled.h" #include "netlabel_mgmt.h" /* NOTE: at present we always use init's network namespace since we don't * presently support different namespaces even though the majority of * the functions in this file are "namespace safe" */ /* The unlabeled connection hash table which we use to map network interfaces * and addresses of unlabeled packets to a user specified secid value for the * LSM. The hash table is used to lookup the network interface entry * (struct netlbl_unlhsh_iface) and then the interface entry is used to * lookup an IP address match from an ordered list. If a network interface * match can not be found in the hash table then the default entry * (netlbl_unlhsh_def) is used. The IP address entry list * (struct netlbl_unlhsh_addr) is ordered such that the entries with a * larger netmask come first. */ struct netlbl_unlhsh_tbl { struct list_head *tbl; u32 size; }; #define netlbl_unlhsh_addr4_entry(iter) \ container_of(iter, struct netlbl_unlhsh_addr4, list) struct netlbl_unlhsh_addr4 { u32 secid; struct netlbl_af4list list; struct rcu_head rcu; }; #define netlbl_unlhsh_addr6_entry(iter) \ container_of(iter, struct netlbl_unlhsh_addr6, list) struct netlbl_unlhsh_addr6 { u32 secid; struct netlbl_af6list list; struct rcu_head rcu; }; struct netlbl_unlhsh_iface { int ifindex; struct list_head addr4_list; struct list_head addr6_list; u32 valid; struct list_head list; struct rcu_head rcu; }; /* Argument struct for netlbl_unlhsh_walk() */ struct netlbl_unlhsh_walk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; /* Unlabeled connection hash table */ /* updates should be so rare that having one spinlock for the entire * hash table should be okay */ static DEFINE_SPINLOCK(netlbl_unlhsh_lock); #define netlbl_unlhsh_rcu_deref(p) \ rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock)) static struct netlbl_unlhsh_tbl __rcu *netlbl_unlhsh; static struct netlbl_unlhsh_iface __rcu *netlbl_unlhsh_def; /* Accept unlabeled packets flag */ static u8 netlabel_unlabel_acceptflg; /* NetLabel Generic NETLINK unlabeled family */ static struct genl_family netlbl_unlabel_gnl_family; /* NetLabel Netlink attribute policy */ static const struct nla_policy netlbl_unlabel_genl_policy[NLBL_UNLABEL_A_MAX + 1] = { [NLBL_UNLABEL_A_ACPTFLG] = { .type = NLA_U8 }, [NLBL_UNLABEL_A_IPV6ADDR] = { .type = NLA_BINARY, .len = sizeof(struct in6_addr) }, [NLBL_UNLABEL_A_IPV6MASK] = { .type = NLA_BINARY, .len = sizeof(struct in6_addr) }, [NLBL_UNLABEL_A_IPV4ADDR] = { .type = NLA_BINARY, .len = sizeof(struct in_addr) }, [NLBL_UNLABEL_A_IPV4MASK] = { .type = NLA_BINARY, .len = sizeof(struct in_addr) }, [NLBL_UNLABEL_A_IFACE] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [NLBL_UNLABEL_A_SECCTX] = { .type = NLA_BINARY } }; /* * Unlabeled Connection Hash Table Functions */ /** * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that memory allocated to a hash table interface entry can be * released safely. It is important to note that this function does not free * the IPv4 and IPv6 address lists contained as part of an interface entry. It * is up to the rest of the code to make sure an interface entry is only freed * once it's address lists are empty. * */ static void netlbl_unlhsh_free_iface(struct rcu_head *entry) { struct netlbl_unlhsh_iface *iface; struct netlbl_af4list *iter4; struct netlbl_af4list *tmp4; #if IS_ENABLED(CONFIG_IPV6) struct netlbl_af6list *iter6; struct netlbl_af6list *tmp6; #endif /* IPv6 */ iface = container_of(entry, struct netlbl_unlhsh_iface, rcu); /* no need for locks here since we are the only one with access to this * structure */ netlbl_af4list_foreach_safe(iter4, tmp4, &iface->addr4_list) { netlbl_af4list_remove_entry(iter4); kfree(netlbl_unlhsh_addr4_entry(iter4)); } #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_safe(iter6, tmp6, &iface->addr6_list) { netlbl_af6list_remove_entry(iter6); kfree(netlbl_unlhsh_addr6_entry(iter6)); } #endif /* IPv6 */ kfree(iface); } /** * netlbl_unlhsh_hash - Hashing function for the hash table * @ifindex: the network interface/device to hash * * Description: * This is the hashing function for the unlabeled hash table, it returns the * bucket number for the given device/interface. The caller is responsible for * ensuring that the hash table is protected with either a RCU read lock or * the hash table lock. * */ static u32 netlbl_unlhsh_hash(int ifindex) { return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1); } /** * netlbl_unlhsh_search_iface - Search for a matching interface entry * @ifindex: the network interface * * Description: * Searches the unlabeled connection hash table and returns a pointer to the * interface entry which matches @ifindex, otherwise NULL is returned. The * caller is responsible for ensuring that the hash table is protected with * either a RCU read lock or the hash table lock. * */ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) { u32 bkt; struct list_head *bkt_list; struct netlbl_unlhsh_iface *iter; bkt = netlbl_unlhsh_hash(ifindex); bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; list_for_each_entry_rcu(iter, bkt_list, list, lockdep_is_held(&netlbl_unlhsh_lock)) if (iter->valid && iter->ifindex == ifindex) return iter; return NULL; } /** * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table * @iface: the associated interface entry * @addr: IPv4 address in network byte order * @mask: IPv4 address mask in network byte order * @secid: LSM secid value for entry * * Description: * Add a new address entry into the unlabeled connection hash table using the * interface entry specified by @iface. On success zero is returned, otherwise * a negative value is returned. * */ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, const struct in_addr *addr, const struct in_addr *mask, u32 secid) { int ret_val; struct netlbl_unlhsh_addr4 *entry; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->list.addr = addr->s_addr & mask->s_addr; entry->list.mask = mask->s_addr; entry->list.valid = 1; entry->secid = secid; spin_lock(&netlbl_unlhsh_lock); ret_val = netlbl_af4list_add(&entry->list, &iface->addr4_list); spin_unlock(&netlbl_unlhsh_lock); if (ret_val != 0) kfree(entry); return ret_val; } #if IS_ENABLED(CONFIG_IPV6) /** * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table * @iface: the associated interface entry * @addr: IPv6 address in network byte order * @mask: IPv6 address mask in network byte order * @secid: LSM secid value for entry * * Description: * Add a new address entry into the unlabeled connection hash table using the * interface entry specified by @iface. On success zero is returned, otherwise * a negative value is returned. * */ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, const struct in6_addr *addr, const struct in6_addr *mask, u32 secid) { int ret_val; struct netlbl_unlhsh_addr6 *entry; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->list.addr = *addr; entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; entry->list.mask = *mask; entry->list.valid = 1; entry->secid = secid; spin_lock(&netlbl_unlhsh_lock); ret_val = netlbl_af6list_add(&entry->list, &iface->addr6_list); spin_unlock(&netlbl_unlhsh_lock); if (ret_val != 0) kfree(entry); return 0; } #endif /* IPv6 */ /** * netlbl_unlhsh_add_iface - Adds a new interface entry to the hash table * @ifindex: network interface * * Description: * Add a new, empty, interface entry into the unlabeled connection hash table. * On success a pointer to the new interface entry is returned, on failure NULL * is returned. * */ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) { u32 bkt; struct netlbl_unlhsh_iface *iface; iface = kzalloc(sizeof(*iface), GFP_ATOMIC); if (iface == NULL) return NULL; iface->ifindex = ifindex; INIT_LIST_HEAD(&iface->addr4_list); INIT_LIST_HEAD(&iface->addr6_list); iface->valid = 1; spin_lock(&netlbl_unlhsh_lock); if (ifindex > 0) { bkt = netlbl_unlhsh_hash(ifindex); if (netlbl_unlhsh_search_iface(ifindex) != NULL) goto add_iface_failure; list_add_tail_rcu(&iface->list, &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); } else { INIT_LIST_HEAD(&iface->list); if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) goto add_iface_failure; rcu_assign_pointer(netlbl_unlhsh_def, iface); } spin_unlock(&netlbl_unlhsh_lock); return iface; add_iface_failure: spin_unlock(&netlbl_unlhsh_lock); kfree(iface); return NULL; } /** * netlbl_unlhsh_add - Adds a new entry to the unlabeled connection hash table * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order * @mask: address mask in network byte order * @addr_len: length of address/mask (4 for IPv4, 16 for IPv6) * @secid: LSM secid value for the entry * @audit_info: NetLabel audit information * * Description: * Adds a new entry to the unlabeled connection hash table. Returns zero on * success, negative values on failure. * */ int netlbl_unlhsh_add(struct net *net, const char *dev_name, const void *addr, const void *mask, u32 addr_len, u32 secid, struct netlbl_audit *audit_info) { int ret_val; int ifindex; struct net_device *dev; struct netlbl_unlhsh_iface *iface; struct audit_buffer *audit_buf = NULL; struct lsm_context ctx; if (addr_len != sizeof(struct in_addr) && addr_len != sizeof(struct in6_addr)) return -EINVAL; rcu_read_lock(); if (dev_name != NULL) { dev = dev_get_by_name_rcu(net, dev_name); if (dev == NULL) { ret_val = -ENODEV; goto unlhsh_add_return; } ifindex = dev->ifindex; iface = netlbl_unlhsh_search_iface(ifindex); } else { ifindex = 0; iface = rcu_dereference(netlbl_unlhsh_def); } if (iface == NULL) { iface = netlbl_unlhsh_add_iface(ifindex); if (iface == NULL) { ret_val = -ENOMEM; goto unlhsh_add_return; } } audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCADD, audit_info); switch (addr_len) { case sizeof(struct in_addr): { const struct in_addr *addr4 = addr; const struct in_addr *mask4 = mask; ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid); if (audit_buf != NULL) netlbl_af4list_audit_addr(audit_buf, 1, dev_name, addr4->s_addr, mask4->s_addr); break; } #if IS_ENABLED(CONFIG_IPV6) case sizeof(struct in6_addr): { const struct in6_addr *addr6 = addr; const struct in6_addr *mask6 = mask; ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid); if (audit_buf != NULL) netlbl_af6list_audit_addr(audit_buf, 1, dev_name, addr6, mask6); break; } #endif /* IPv6 */ default: ret_val = -EINVAL; } if (ret_val == 0) atomic_inc(&netlabel_mgmt_protocount); unlhsh_add_return: rcu_read_unlock(); if (audit_buf != NULL) { if (security_secid_to_secctx(secid, &ctx) >= 0) { audit_log_format(audit_buf, " sec_obj=%s", ctx.context); security_release_secctx(&ctx); } audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * netlbl_unlhsh_remove_addr4 - Remove an IPv4 address entry * @net: network namespace * @iface: interface entry * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Remove an IP address entry from the unlabeled connection hash table. * Returns zero on success, negative values on failure. * */ static int netlbl_unlhsh_remove_addr4(struct net *net, struct netlbl_unlhsh_iface *iface, const struct in_addr *addr, const struct in_addr *mask, struct netlbl_audit *audit_info) { struct netlbl_af4list *list_entry; struct netlbl_unlhsh_addr4 *entry; struct audit_buffer *audit_buf; struct net_device *dev; struct lsm_context ctx; spin_lock(&netlbl_unlhsh_lock); list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, &iface->addr4_list); spin_unlock(&netlbl_unlhsh_lock); if (list_entry != NULL) entry = netlbl_unlhsh_addr4_entry(list_entry); else entry = NULL; audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, audit_info); if (audit_buf != NULL) { dev = dev_get_by_index(net, iface->ifindex); netlbl_af4list_audit_addr(audit_buf, 1, (dev != NULL ? dev->name : NULL), addr->s_addr, mask->s_addr); dev_put(dev); if (entry != NULL && security_secid_to_secctx(entry->secid, &ctx) >= 0) { audit_log_format(audit_buf, " sec_obj=%s", ctx.context); security_release_secctx(&ctx); } audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); audit_log_end(audit_buf); } if (entry == NULL) return -ENOENT; kfree_rcu(entry, rcu); return 0; } #if IS_ENABLED(CONFIG_IPV6) /** * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry * @net: network namespace * @iface: interface entry * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Remove an IP address entry from the unlabeled connection hash table. * Returns zero on success, negative values on failure. * */ static int netlbl_unlhsh_remove_addr6(struct net *net, struct netlbl_unlhsh_iface *iface, const struct in6_addr *addr, const struct in6_addr *mask, struct netlbl_audit *audit_info) { struct netlbl_af6list *list_entry; struct netlbl_unlhsh_addr6 *entry; struct audit_buffer *audit_buf; struct net_device *dev; struct lsm_context ctx; spin_lock(&netlbl_unlhsh_lock); list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); spin_unlock(&netlbl_unlhsh_lock); if (list_entry != NULL) entry = netlbl_unlhsh_addr6_entry(list_entry); else entry = NULL; audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, audit_info); if (audit_buf != NULL) { dev = dev_get_by_index(net, iface->ifindex); netlbl_af6list_audit_addr(audit_buf, 1, (dev != NULL ? dev->name : NULL), addr, mask); dev_put(dev); if (entry != NULL && security_secid_to_secctx(entry->secid, &ctx) >= 0) { audit_log_format(audit_buf, " sec_obj=%s", ctx.context); security_release_secctx(&ctx); } audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0); audit_log_end(audit_buf); } if (entry == NULL) return -ENOENT; kfree_rcu(entry, rcu); return 0; } #endif /* IPv6 */ /** * netlbl_unlhsh_condremove_iface - Remove an interface entry * @iface: the interface entry * * Description: * Remove an interface entry from the unlabeled connection hash table if it is * empty. An interface entry is considered to be empty if there are no * address entries assigned to it. * */ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface) { struct netlbl_af4list *iter4; #if IS_ENABLED(CONFIG_IPV6) struct netlbl_af6list *iter6; #endif /* IPv6 */ spin_lock(&netlbl_unlhsh_lock); netlbl_af4list_foreach_rcu(iter4, &iface->addr4_list) goto unlhsh_condremove_failure; #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(iter6, &iface->addr6_list) goto unlhsh_condremove_failure; #endif /* IPv6 */ iface->valid = 0; if (iface->ifindex > 0) list_del_rcu(&iface->list); else RCU_INIT_POINTER(netlbl_unlhsh_def, NULL); spin_unlock(&netlbl_unlhsh_lock); call_rcu(&iface->rcu, netlbl_unlhsh_free_iface); return; unlhsh_condremove_failure: spin_unlock(&netlbl_unlhsh_lock); } /** * netlbl_unlhsh_remove - Remove an entry from the unlabeled hash table * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order * @mask: address mask in network byte order * @addr_len: length of address/mask (4 for IPv4, 16 for IPv6) * @audit_info: NetLabel audit information * * Description: * Removes and existing entry from the unlabeled connection hash table. * Returns zero on success, negative values on failure. * */ int netlbl_unlhsh_remove(struct net *net, const char *dev_name, const void *addr, const void *mask, u32 addr_len, struct netlbl_audit *audit_info) { int ret_val; struct net_device *dev; struct netlbl_unlhsh_iface *iface; if (addr_len != sizeof(struct in_addr) && addr_len != sizeof(struct in6_addr)) return -EINVAL; rcu_read_lock(); if (dev_name != NULL) { dev = dev_get_by_name_rcu(net, dev_name); if (dev == NULL) { ret_val = -ENODEV; goto unlhsh_remove_return; } iface = netlbl_unlhsh_search_iface(dev->ifindex); } else iface = rcu_dereference(netlbl_unlhsh_def); if (iface == NULL) { ret_val = -ENOENT; goto unlhsh_remove_return; } switch (addr_len) { case sizeof(struct in_addr): ret_val = netlbl_unlhsh_remove_addr4(net, iface, addr, mask, audit_info); break; #if IS_ENABLED(CONFIG_IPV6) case sizeof(struct in6_addr): ret_val = netlbl_unlhsh_remove_addr6(net, iface, addr, mask, audit_info); break; #endif /* IPv6 */ default: ret_val = -EINVAL; } if (ret_val == 0) { netlbl_unlhsh_condremove_iface(iface); atomic_dec(&netlabel_mgmt_protocount); } unlhsh_remove_return: rcu_read_unlock(); return ret_val; } /* * General Helper Functions */ /** * netlbl_unlhsh_netdev_handler - Network device notification handler * @this: notifier block * @event: the event * @ptr: the netdevice notifier info (cast to void) * * Description: * Handle network device events, although at present all we care about is a * network device going away. In the case of a device going away we clear any * related entries from the unlabeled connection hash table. * */ static int netlbl_unlhsh_netdev_handler(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netlbl_unlhsh_iface *iface = NULL; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; /* XXX - should this be a check for NETDEV_DOWN or _UNREGISTER? */ if (event == NETDEV_DOWN) { spin_lock(&netlbl_unlhsh_lock); iface = netlbl_unlhsh_search_iface(dev->ifindex); if (iface != NULL && iface->valid) { iface->valid = 0; list_del_rcu(&iface->list); } else iface = NULL; spin_unlock(&netlbl_unlhsh_lock); } if (iface != NULL) call_rcu(&iface->rcu, netlbl_unlhsh_free_iface); return NOTIFY_DONE; } /** * netlbl_unlabel_acceptflg_set - Set the unlabeled accept flag * @value: desired value * @audit_info: NetLabel audit information * * Description: * Set the value of the unlabeled accept flag to @value. * */ static void netlbl_unlabel_acceptflg_set(u8 value, struct netlbl_audit *audit_info) { struct audit_buffer *audit_buf; u8 old_val; old_val = netlabel_unlabel_acceptflg; netlabel_unlabel_acceptflg = value; audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " unlbl_accept=%u old=%u", value, old_val); audit_log_end(audit_buf); } } /** * netlbl_unlabel_addrinfo_get - Get the IPv4/6 address information * @info: the Generic NETLINK info block * @addr: the IP address * @mask: the IP address mask * @len: the address length * * Description: * Examine the Generic NETLINK message and extract the IP address information. * Returns zero on success, negative values on failure. * */ static int netlbl_unlabel_addrinfo_get(struct genl_info *info, void **addr, void **mask, u32 *len) { u32 addr_len; if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] && info->attrs[NLBL_UNLABEL_A_IPV4MASK]) { addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); if (addr_len != sizeof(struct in_addr) && addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) return -EINVAL; *len = addr_len; *addr = nla_data(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); *mask = nla_data(info->attrs[NLBL_UNLABEL_A_IPV4MASK]); return 0; } else if (info->attrs[NLBL_UNLABEL_A_IPV6ADDR]) { addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV6ADDR]); if (addr_len != sizeof(struct in6_addr) && addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV6MASK])) return -EINVAL; *len = addr_len; *addr = nla_data(info->attrs[NLBL_UNLABEL_A_IPV6ADDR]); *mask = nla_data(info->attrs[NLBL_UNLABEL_A_IPV6MASK]); return 0; } return -EINVAL; } /* * NetLabel Command Handlers */ /** * netlbl_unlabel_accept - Handle an ACCEPT message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated ACCEPT message and set the accept flag accordingly. * Returns zero on success, negative values on failure. * */ static int netlbl_unlabel_accept(struct sk_buff *skb, struct genl_info *info) { u8 value; struct netlbl_audit audit_info; if (info->attrs[NLBL_UNLABEL_A_ACPTFLG]) { value = nla_get_u8(info->attrs[NLBL_UNLABEL_A_ACPTFLG]); if (value == 1 || value == 0) { netlbl_netlink_auditinfo(&audit_info); netlbl_unlabel_acceptflg_set(value, &audit_info); return 0; } } return -EINVAL; } /** * netlbl_unlabel_list - Handle a LIST message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated LIST message and respond with the current status. * Returns zero on success, negative values on failure. * */ static int netlbl_unlabel_list(struct sk_buff *skb, struct genl_info *info) { int ret_val = -EINVAL; struct sk_buff *ans_skb; void *data; ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (ans_skb == NULL) goto list_failure; data = genlmsg_put_reply(ans_skb, info, &netlbl_unlabel_gnl_family, 0, NLBL_UNLABEL_C_LIST); if (data == NULL) { ret_val = -ENOMEM; goto list_failure; } ret_val = nla_put_u8(ans_skb, NLBL_UNLABEL_A_ACPTFLG, netlabel_unlabel_acceptflg); if (ret_val != 0) goto list_failure; genlmsg_end(ans_skb, data); return genlmsg_reply(ans_skb, info); list_failure: kfree_skb(ans_skb); return ret_val; } /** * netlbl_unlabel_staticadd - Handle a STATICADD message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated STATICADD message and add a new unlabeled * connection entry to the hash table. Returns zero on success, negative * values on failure. * */ static int netlbl_unlabel_staticadd(struct sk_buff *skb, struct genl_info *info) { int ret_val; char *dev_name; void *addr; void *mask; u32 addr_len; u32 secid; struct netlbl_audit audit_info; /* Don't allow users to add both IPv4 and IPv6 addresses for a * single entry. However, allow users to create two entries, one each * for IPv4 and IPv6, with the same LSM security context which should * achieve the same result. */ if (!info->attrs[NLBL_UNLABEL_A_SECCTX] || !info->attrs[NLBL_UNLABEL_A_IFACE] || !((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^ (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV6MASK]))) return -EINVAL; netlbl_netlink_auditinfo(&audit_info); ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len); if (ret_val != 0) return ret_val; dev_name = nla_data(info->attrs[NLBL_UNLABEL_A_IFACE]); ret_val = security_secctx_to_secid( nla_data(info->attrs[NLBL_UNLABEL_A_SECCTX]), nla_len(info->attrs[NLBL_UNLABEL_A_SECCTX]), &secid); if (ret_val != 0) return ret_val; return netlbl_unlhsh_add(&init_net, dev_name, addr, mask, addr_len, secid, &audit_info); } /** * netlbl_unlabel_staticadddef - Handle a STATICADDDEF message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated STATICADDDEF message and add a new default * unlabeled connection entry. Returns zero on success, negative values on * failure. * */ static int netlbl_unlabel_staticadddef(struct sk_buff *skb, struct genl_info *info) { int ret_val; void *addr; void *mask; u32 addr_len; u32 secid; struct netlbl_audit audit_info; /* Don't allow users to add both IPv4 and IPv6 addresses for a * single entry. However, allow users to create two entries, one each * for IPv4 and IPv6, with the same LSM security context which should * achieve the same result. */ if (!info->attrs[NLBL_UNLABEL_A_SECCTX] || !((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^ (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV6MASK]))) return -EINVAL; netlbl_netlink_auditinfo(&audit_info); ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len); if (ret_val != 0) return ret_val; ret_val = security_secctx_to_secid( nla_data(info->attrs[NLBL_UNLABEL_A_SECCTX]), nla_len(info->attrs[NLBL_UNLABEL_A_SECCTX]), &secid); if (ret_val != 0) return ret_val; return netlbl_unlhsh_add(&init_net, NULL, addr, mask, addr_len, secid, &audit_info); } /** * netlbl_unlabel_staticremove - Handle a STATICREMOVE message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated STATICREMOVE message and remove the specified * unlabeled connection entry. Returns zero on success, negative values on * failure. * */ static int netlbl_unlabel_staticremove(struct sk_buff *skb, struct genl_info *info) { int ret_val; char *dev_name; void *addr; void *mask; u32 addr_len; struct netlbl_audit audit_info; /* See the note in netlbl_unlabel_staticadd() about not allowing both * IPv4 and IPv6 in the same entry. */ if (!info->attrs[NLBL_UNLABEL_A_IFACE] || !((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^ (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV6MASK]))) return -EINVAL; netlbl_netlink_auditinfo(&audit_info); ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len); if (ret_val != 0) return ret_val; dev_name = nla_data(info->attrs[NLBL_UNLABEL_A_IFACE]); return netlbl_unlhsh_remove(&init_net, dev_name, addr, mask, addr_len, &audit_info); } /** * netlbl_unlabel_staticremovedef - Handle a STATICREMOVEDEF message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated STATICREMOVEDEF message and remove the default * unlabeled connection entry. Returns zero on success, negative values on * failure. * */ static int netlbl_unlabel_staticremovedef(struct sk_buff *skb, struct genl_info *info) { int ret_val; void *addr; void *mask; u32 addr_len; struct netlbl_audit audit_info; /* See the note in netlbl_unlabel_staticadd() about not allowing both * IPv4 and IPv6 in the same entry. */ if (!((!info->attrs[NLBL_UNLABEL_A_IPV4ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV4MASK]) ^ (!info->attrs[NLBL_UNLABEL_A_IPV6ADDR] || !info->attrs[NLBL_UNLABEL_A_IPV6MASK]))) return -EINVAL; netlbl_netlink_auditinfo(&audit_info); ret_val = netlbl_unlabel_addrinfo_get(info, &addr, &mask, &addr_len); if (ret_val != 0) return ret_val; return netlbl_unlhsh_remove(&init_net, NULL, addr, mask, addr_len, &audit_info); } /** * netlbl_unlabel_staticlist_gen - Generate messages for STATICLIST[DEF] * @cmd: command/message * @iface: the interface entry * @addr4: the IPv4 address entry * @addr6: the IPv6 address entry * @arg: the netlbl_unlhsh_walk_arg structure * * Description: * This function is designed to be used to generate a response for a * STATICLIST or STATICLISTDEF message. When called either @addr4 or @addr6 * can be specified, not both, the other unspecified entry should be set to * NULL by the caller. Returns the size of the message on success, negative * values on failure. * */ static int netlbl_unlabel_staticlist_gen(u32 cmd, const struct netlbl_unlhsh_iface *iface, const struct netlbl_unlhsh_addr4 *addr4, const struct netlbl_unlhsh_addr6 *addr6, void *arg) { int ret_val = -ENOMEM; struct netlbl_unlhsh_walk_arg *cb_arg = arg; struct net_device *dev; struct lsm_context ctx; void *data; u32 secid; data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid, cb_arg->seq, &netlbl_unlabel_gnl_family, NLM_F_MULTI, cmd); if (data == NULL) goto list_cb_failure; if (iface->ifindex > 0) { dev = dev_get_by_index(&init_net, iface->ifindex); if (!dev) { ret_val = -ENODEV; goto list_cb_failure; } ret_val = nla_put_string(cb_arg->skb, NLBL_UNLABEL_A_IFACE, dev->name); dev_put(dev); if (ret_val != 0) goto list_cb_failure; } if (addr4) { struct in_addr addr_struct; addr_struct.s_addr = addr4->list.addr; ret_val = nla_put_in_addr(cb_arg->skb, NLBL_UNLABEL_A_IPV4ADDR, addr_struct.s_addr); if (ret_val != 0) goto list_cb_failure; addr_struct.s_addr = addr4->list.mask; ret_val = nla_put_in_addr(cb_arg->skb, NLBL_UNLABEL_A_IPV4MASK, addr_struct.s_addr); if (ret_val != 0) goto list_cb_failure; secid = addr4->secid; } else { ret_val = nla_put_in6_addr(cb_arg->skb, NLBL_UNLABEL_A_IPV6ADDR, &addr6->list.addr); if (ret_val != 0) goto list_cb_failure; ret_val = nla_put_in6_addr(cb_arg->skb, NLBL_UNLABEL_A_IPV6MASK, &addr6->list.mask); if (ret_val != 0) goto list_cb_failure; secid = addr6->secid; } ret_val = security_secid_to_secctx(secid, &ctx); if (ret_val < 0) goto list_cb_failure; ret_val = nla_put(cb_arg->skb, NLBL_UNLABEL_A_SECCTX, ctx.len, ctx.context); security_release_secctx(&ctx); if (ret_val != 0) goto list_cb_failure; cb_arg->seq++; genlmsg_end(cb_arg->skb, data); return 0; list_cb_failure: genlmsg_cancel(cb_arg->skb, data); return ret_val; } /** * netlbl_unlabel_staticlist - Handle a STATICLIST message * @skb: the NETLINK buffer * @cb: the NETLINK callback * * Description: * Process a user generated STATICLIST message and dump the unlabeled * connection hash table in a form suitable for use in a kernel generated * STATICLIST message. Returns the length of @skb. * */ static int netlbl_unlabel_staticlist(struct sk_buff *skb, struct netlink_callback *cb) { struct netlbl_unlhsh_walk_arg cb_arg; u32 skip_bkt = cb->args[0]; u32 skip_chain = cb->args[1]; u32 skip_addr4 = cb->args[2]; u32 iter_bkt, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; struct netlbl_unlhsh_iface *iface; struct list_head *iter_list; struct netlbl_af4list *addr4; #if IS_ENABLED(CONFIG_IPV6) u32 skip_addr6 = cb->args[3]; struct netlbl_af6list *addr6; #endif cb_arg.nl_cb = cb; cb_arg.skb = skb; cb_arg.seq = cb->nlh->nlmsg_seq; rcu_read_lock(); for (iter_bkt = skip_bkt; iter_bkt < rcu_dereference(netlbl_unlhsh)->size; iter_bkt++) { iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt]; list_for_each_entry_rcu(iface, iter_list, list) { if (!iface->valid || iter_chain++ < skip_chain) continue; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { if (iter_addr4++ < skip_addr4) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, iface, netlbl_unlhsh_addr4_entry(addr4), NULL, &cb_arg) < 0) { iter_addr4--; iter_chain--; goto unlabel_staticlist_return; } } iter_addr4 = 0; skip_addr4 = 0; #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { if (iter_addr6++ < skip_addr6) continue; if (netlbl_unlabel_staticlist_gen( NLBL_UNLABEL_C_STATICLIST, iface, NULL, netlbl_unlhsh_addr6_entry(addr6), &cb_arg) < 0) { iter_addr6--; iter_chain--; goto unlabel_staticlist_return; } } iter_addr6 = 0; skip_addr6 = 0; #endif /* IPv6 */ } iter_chain = 0; skip_chain = 0; } unlabel_staticlist_return: rcu_read_unlock(); cb->args[0] = iter_bkt; cb->args[1] = iter_chain; cb->args[2] = iter_addr4; cb->args[3] = iter_addr6; return skb->len; } /** * netlbl_unlabel_staticlistdef - Handle a STATICLISTDEF message * @skb: the NETLINK buffer * @cb: the NETLINK callback * * Description: * Process a user generated STATICLISTDEF message and dump the default * unlabeled connection entry in a form suitable for use in a kernel generated * STATICLISTDEF message. Returns the length of @skb. * */ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb, struct netlink_callback *cb) { struct netlbl_unlhsh_walk_arg cb_arg; struct netlbl_unlhsh_iface *iface; u32 iter_addr4 = 0, iter_addr6 = 0; struct netlbl_af4list *addr4; #if IS_ENABLED(CONFIG_IPV6) struct netlbl_af6list *addr6; #endif cb_arg.nl_cb = cb; cb_arg.skb = skb; cb_arg.seq = cb->nlh->nlmsg_seq; rcu_read_lock(); iface = rcu_dereference(netlbl_unlhsh_def); if (iface == NULL || !iface->valid) goto unlabel_staticlistdef_return; netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { if (iter_addr4++ < cb->args[0]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, netlbl_unlhsh_addr4_entry(addr4), NULL, &cb_arg) < 0) { iter_addr4--; goto unlabel_staticlistdef_return; } } #if IS_ENABLED(CONFIG_IPV6) netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { if (iter_addr6++ < cb->args[1]) continue; if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, iface, NULL, netlbl_unlhsh_addr6_entry(addr6), &cb_arg) < 0) { iter_addr6--; goto unlabel_staticlistdef_return; } } #endif /* IPv6 */ unlabel_staticlistdef_return: rcu_read_unlock(); cb->args[0] = iter_addr4; cb->args[1] = iter_addr6; return skb->len; } /* * NetLabel Generic NETLINK Command Definitions */ static const struct genl_small_ops netlbl_unlabel_genl_ops[] = { { .cmd = NLBL_UNLABEL_C_STATICADD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = netlbl_unlabel_staticadd, .dumpit = NULL, }, { .cmd = NLBL_UNLABEL_C_STATICREMOVE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = netlbl_unlabel_staticremove, .dumpit = NULL, }, { .cmd = NLBL_UNLABEL_C_STATICLIST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = 0, .doit = NULL, .dumpit = netlbl_unlabel_staticlist, }, { .cmd = NLBL_UNLABEL_C_STATICADDDEF, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = netlbl_unlabel_staticadddef, .dumpit = NULL, }, { .cmd = NLBL_UNLABEL_C_STATICREMOVEDEF, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = netlbl_unlabel_staticremovedef, .dumpit = NULL, }, { .cmd = NLBL_UNLABEL_C_STATICLISTDEF, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = 0, .doit = NULL, .dumpit = netlbl_unlabel_staticlistdef, }, { .cmd = NLBL_UNLABEL_C_ACCEPT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = netlbl_unlabel_accept, .dumpit = NULL, }, { .cmd = NLBL_UNLABEL_C_LIST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = 0, .doit = netlbl_unlabel_list, .dumpit = NULL, }, }; static struct genl_family netlbl_unlabel_gnl_family __ro_after_init = { .hdrsize = 0, .name = NETLBL_NLTYPE_UNLABELED_NAME, .version = NETLBL_PROTO_VERSION, .maxattr = NLBL_UNLABEL_A_MAX, .policy = netlbl_unlabel_genl_policy, .module = THIS_MODULE, .small_ops = netlbl_unlabel_genl_ops, .n_small_ops = ARRAY_SIZE(netlbl_unlabel_genl_ops), .resv_start_op = NLBL_UNLABEL_C_STATICLISTDEF + 1, }; /* * NetLabel Generic NETLINK Protocol Functions */ /** * netlbl_unlabel_genl_init - Register the Unlabeled NetLabel component * * Description: * Register the unlabeled packet NetLabel component with the Generic NETLINK * mechanism. Returns zero on success, negative values on failure. * */ int __init netlbl_unlabel_genl_init(void) { return genl_register_family(&netlbl_unlabel_gnl_family); } /* * NetLabel KAPI Hooks */ static struct notifier_block netlbl_unlhsh_netdev_notifier = { .notifier_call = netlbl_unlhsh_netdev_handler, }; /** * netlbl_unlabel_init - Initialize the unlabeled connection hash table * @size: the number of bits to use for the hash buckets * * Description: * Initializes the unlabeled connection hash table and registers a network * device notification handler. This function should only be called by the * NetLabel subsystem itself during initialization. Returns zero on success, * non-zero values on error. * */ int __init netlbl_unlabel_init(u32 size) { u32 iter; struct netlbl_unlhsh_tbl *hsh_tbl; if (size == 0) return -EINVAL; hsh_tbl = kmalloc(sizeof(*hsh_tbl), GFP_KERNEL); if (hsh_tbl == NULL) return -ENOMEM; hsh_tbl->size = 1 << size; hsh_tbl->tbl = kcalloc(hsh_tbl->size, sizeof(struct list_head), GFP_KERNEL); if (hsh_tbl->tbl == NULL) { kfree(hsh_tbl); return -ENOMEM; } for (iter = 0; iter < hsh_tbl->size; iter++) INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); spin_lock(&netlbl_unlhsh_lock); rcu_assign_pointer(netlbl_unlhsh, hsh_tbl); spin_unlock(&netlbl_unlhsh_lock); register_netdevice_notifier(&netlbl_unlhsh_netdev_notifier); return 0; } /** * netlbl_unlabel_getattr - Get the security attributes for an unlabled packet * @skb: the packet * @family: protocol family * @secattr: the security attributes * * Description: * Determine the security attributes, if any, for an unlabled packet and return * them in @secattr. Returns zero on success and negative values on failure. * */ int netlbl_unlabel_getattr(const struct sk_buff *skb, u16 family, struct netlbl_lsm_secattr *secattr) { struct netlbl_unlhsh_iface *iface; rcu_read_lock(); iface = netlbl_unlhsh_search_iface(skb->skb_iif); if (iface == NULL) iface = rcu_dereference(netlbl_unlhsh_def); if (iface == NULL || !iface->valid) goto unlabel_getattr_nolabel; #if IS_ENABLED(CONFIG_IPV6) /* When resolving a fallback label, check the sk_buff version as * it is possible (e.g. SCTP) to have family = PF_INET6 while * receiving ip_hdr(skb)->version = 4. */ if (family == PF_INET6 && ip_hdr(skb)->version == 4) family = PF_INET; #endif /* IPv6 */ switch (family) { case PF_INET: { struct iphdr *hdr4; struct netlbl_af4list *addr4; hdr4 = ip_hdr(skb); addr4 = netlbl_af4list_search(hdr4->saddr, &iface->addr4_list); if (addr4 == NULL) goto unlabel_getattr_nolabel; secattr->attr.secid = netlbl_unlhsh_addr4_entry(addr4)->secid; break; } #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: { struct ipv6hdr *hdr6; struct netlbl_af6list *addr6; hdr6 = ipv6_hdr(skb); addr6 = netlbl_af6list_search(&hdr6->saddr, &iface->addr6_list); if (addr6 == NULL) goto unlabel_getattr_nolabel; secattr->attr.secid = netlbl_unlhsh_addr6_entry(addr6)->secid; break; } #endif /* IPv6 */ default: goto unlabel_getattr_nolabel; } rcu_read_unlock(); secattr->flags |= NETLBL_SECATTR_SECID; secattr->type = NETLBL_NLTYPE_UNLABELED; return 0; unlabel_getattr_nolabel: rcu_read_unlock(); if (netlabel_unlabel_acceptflg == 0) return -ENOMSG; secattr->type = NETLBL_NLTYPE_UNLABELED; return 0; } /** * netlbl_unlabel_defconf - Set the default config to allow unlabeled packets * * Description: * Set the default NetLabel configuration to allow incoming unlabeled packets * and to send unlabeled network traffic by default. * */ int __init netlbl_unlabel_defconf(void) { int ret_val; struct netlbl_dom_map *entry; struct netlbl_audit audit_info; /* Only the kernel is allowed to call this function and the only time * it is called is at bootup before the audit subsystem is reporting * messages so don't worry to much about these values. */ security_current_getlsmprop_subj(&audit_info.prop); audit_info.loginuid = GLOBAL_ROOT_UID; audit_info.sessionid = 0; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (entry == NULL) return -ENOMEM; entry->family = AF_UNSPEC; entry->def.type = NETLBL_NLTYPE_UNLABELED; ret_val = netlbl_domhsh_add_default(entry, &audit_info); if (ret_val != 0) return ret_val; netlbl_unlabel_acceptflg_set(1, &audit_info); return 0; }
10 17 10 14 3 3 16 22 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Fast and scalable bitmaps. * * Copyright (C) 2016 Facebook * Copyright (C) 2013-2014 Jens Axboe */ #ifndef __LINUX_SCALE_BITMAP_H #define __LINUX_SCALE_BITMAP_H #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/cache.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/minmax.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/types.h> #include <linux/wait.h> struct seq_file; /** * struct sbitmap_word - Word in a &struct sbitmap. */ struct sbitmap_word { /** * @word: word holding free bits */ unsigned long word; /** * @cleared: word holding cleared bits */ unsigned long cleared ____cacheline_aligned_in_smp; /** * @swap_lock: serializes simultaneous updates of ->word and ->cleared */ raw_spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** * struct sbitmap - Scalable bitmap. * * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This * trades off higher memory usage for better scalability. */ struct sbitmap { /** * @depth: Number of bits used in the whole bitmap. */ unsigned int depth; /** * @shift: log2(number of bits used per word) */ unsigned int shift; /** * @map_nr: Number of words (cachelines) being used for the bitmap. */ unsigned int map_nr; /** * @round_robin: Allocate bits in strict round-robin order. */ bool round_robin; /** * @map: Allocated bitmap. */ struct sbitmap_word *map; /* * @alloc_hint: Cache of last successfully allocated or freed bit. * * This is per-cpu, which allows multiple users to stick to different * cachelines until the map is exhausted. */ unsigned int __percpu *alloc_hint; }; #define SBQ_WAIT_QUEUES 8 #define SBQ_WAKE_BATCH 8 /** * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue. */ struct sbq_wait_state { /** * @wait: Wait queue. */ wait_queue_head_t wait; } ____cacheline_aligned_in_smp; /** * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free * bits. * * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to * avoid contention on the wait queue spinlock. This ensures that we don't hit a * scalability wall when we run out of free bits and have to start putting tasks * to sleep. */ struct sbitmap_queue { /** * @sb: Scalable bitmap. */ struct sbitmap sb; /** * @wake_batch: Number of bits which must be freed before we wake up any * waiters. */ unsigned int wake_batch; /** * @wake_index: Next wait queue in @ws to wake up. */ atomic_t wake_index; /** * @ws: Wait queues. */ struct sbq_wait_state *ws; /* * @ws_active: count of currently active ws waitqueues */ atomic_t ws_active; /** * @min_shallow_depth: The minimum shallow depth which may be passed to * sbitmap_queue_get_shallow() */ unsigned int min_shallow_depth; /** * @completion_cnt: Number of bits cleared passed to the * wakeup function. */ atomic_t completion_cnt; /** * @wakeup_cnt: Number of thread wake ups issued. */ atomic_t wakeup_cnt; }; /** * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node. * @sb: Bitmap to initialize. * @depth: Number of bits to allocate. * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if * given, a good default is chosen. * @flags: Allocation flags. * @node: Memory node to allocate on. * @round_robin: If true, be stricter about allocation order; always allocate * starting from the last allocated bit. This is less efficient * than the default behavior (false). * @alloc_hint: If true, apply percpu hint for where to start searching for * a free bit. * * Return: Zero on success or negative errno on failure. */ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, gfp_t flags, int node, bool round_robin, bool alloc_hint); /* sbitmap internal helper */ static inline unsigned int __map_depth(const struct sbitmap *sb, int index) { if (index == sb->map_nr - 1) return sb->depth - (index << sb->shift); return 1U << sb->shift; } /** * sbitmap_free() - Free memory used by a &struct sbitmap. * @sb: Bitmap to free. */ static inline void sbitmap_free(struct sbitmap *sb) { free_percpu(sb->alloc_hint); kvfree(sb->map); sb->map = NULL; } /** * sbitmap_resize() - Resize a &struct sbitmap. * @sb: Bitmap to resize. * @depth: New number of bits to resize to. * * Doesn't reallocate anything. It's up to the caller to ensure that the new * depth doesn't exceed the depth that the sb was initialized with. */ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); /** * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. * @sb: Bitmap to allocate from. * * This operation provides acquire barrier semantics if it succeeds. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get(struct sbitmap *sb); /** * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, * limiting the depth used from each word. * @sb: Bitmap to allocate from. * @shallow_depth: The maximum number of bits to allocate from a single word. * * This rather specific operation allows for having multiple users with * different allocation limits. E.g., there can be a high-priority class that * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority * class can only allocate half of the total bits in the bitmap, preventing it * from starving out the high-priority class. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); /** * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. * @sb: Bitmap to check. * * Return: true if any bit in the bitmap is set, false otherwise. */ bool sbitmap_any_bit_set(const struct sbitmap *sb); #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); /** * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. * @start: Where to start the iteration. * @sb: Bitmap to iterate over. * @fn: Callback. Should return true to continue or false to break early. * @data: Pointer to pass to callback. * * This is inline even though it's non-trivial so that the function calls to the * callback will hopefully get optimized away. */ static inline void __sbitmap_for_each_set(struct sbitmap *sb, unsigned int start, sb_for_each_fn fn, void *data) { unsigned int index; unsigned int nr; unsigned int scanned = 0; if (start >= sb->depth) start = 0; index = SB_NR_TO_INDEX(sb, start); nr = SB_NR_TO_BIT(sb, start); while (scanned < sb->depth) { unsigned long word; unsigned int depth = min_t(unsigned int, __map_depth(sb, index) - nr, sb->depth - scanned); scanned += depth; word = sb->map[index].word & ~sb->map[index].cleared; if (!word) goto next; /* * On the first iteration of the outer loop, we need to add the * bit offset back to the size of the word for find_next_bit(). * On all other iterations, nr is zero, so this is a noop. */ depth += nr; while (1) { nr = find_next_bit(&word, depth, nr); if (nr >= depth) break; if (!fn(sb, (index << sb->shift) + nr, data)) return; nr++; } next: nr = 0; if (++index >= sb->map_nr) index = 0; } } /** * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. * @sb: Bitmap to iterate over. * @fn: Callback. Should return true to continue or false to break early. * @data: Pointer to pass to callback. */ static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, void *data) { __sbitmap_for_each_set(sb, 0, fn, data); } static inline unsigned long *__sbitmap_word(struct sbitmap *sb, unsigned int bitnr) { return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; } /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */ static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr) { set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) { clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } /* * This one is special, since it doesn't actually clear the bit, rather it * sets the corresponding bit in the ->cleared mask instead. Paired with * the caller doing sbitmap_deferred_clear() if a given index is full, which * will clear the previously freed entries in the corresponding ->word. */ static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) { unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; set_bit(SB_NR_TO_BIT(sb, bitnr), addr); } /* * Pair of sbitmap_get, and this one applies both cleared bit and * allocation hint. */ static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr) { sbitmap_deferred_clear_bit(sb, bitnr); if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth)) *raw_cpu_ptr(sb->alloc_hint) = bitnr; } static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) { return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } static inline int sbitmap_calculate_shift(unsigned int depth) { int shift = ilog2(BITS_PER_LONG); /* * If the bitmap is small, shrink the number of bits per word so * we spread over a few cachelines, at least. If less than 4 * bits, just forget about it, it's not going to work optimally * anyway. */ if (depth >= 4) { while ((4U << shift) > depth) shift--; } return shift; } /** * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. * @sb: Bitmap to show. * @m: struct seq_file to write to. * * This is intended for debugging. The format may change at any time. */ void sbitmap_show(struct sbitmap *sb, struct seq_file *m); /** * sbitmap_weight() - Return how many set and not cleared bits in a &struct * sbitmap. * @sb: Bitmap to check. * * Return: How many set and not cleared bits set */ unsigned int sbitmap_weight(const struct sbitmap *sb); /** * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct * seq_file. * @sb: Bitmap to show. * @m: struct seq_file to write to. * * This is intended for debugging. The output isn't guaranteed to be internally * consistent. */ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); /** * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific * memory node. * @sbq: Bitmap queue to initialize. * @depth: See sbitmap_init_node(). * @shift: See sbitmap_init_node(). * @round_robin: See sbitmap_get(). * @flags: Allocation flags. * @node: Memory node to allocate on. * * Return: Zero on success or negative errno on failure. */ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, int shift, bool round_robin, gfp_t flags, int node); /** * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue. * * @sbq: Bitmap queue to free. */ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) { kfree(sbq->ws); sbitmap_free(&sbq->sb); } /** * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch * @sbq: Bitmap queue to recalculate wake batch. * @users: Number of shares. * * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch * by depth. This interface is for HCTX shared tags or queue shared tags. */ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, unsigned int users); /** * sbitmap_queue_resize() - Resize a &struct sbitmap_queue. * @sbq: Bitmap queue to resize. * @depth: New number of bits to resize to. * * Like sbitmap_resize(), this doesn't reallocate anything. It has to do * some extra work on the &struct sbitmap_queue, so it's not safe to just * resize the underlying &struct sbitmap. */ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); /** * __sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue with preemption already disabled. * @sbq: Bitmap queue to allocate from. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int __sbitmap_queue_get(struct sbitmap_queue *sbq); /** * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits * @sbq: Bitmap queue to allocate from. * @nr_tags: number of tags requested * @offset: offset to add to returned bits * * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is * a bit in the mask returned, and the caller must add @offset to the value to * get the absolute tag value. */ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, unsigned int *offset); /** * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue, limiting the depth used from each word, with preemption * already disabled. * @sbq: Bitmap queue to allocate from. * @shallow_depth: The maximum number of bits to allocate from a single word. * See sbitmap_get_shallow(). * * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after * initializing @sbq. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, unsigned int shallow_depth); /** * sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue. * @sbq: Bitmap queue to allocate from. * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to * sbitmap_queue_clear()). * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, unsigned int *cpu) { int nr; *cpu = get_cpu(); nr = __sbitmap_queue_get(sbq); put_cpu(); return nr; } /** * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the * minimum shallow depth that will be used. * @sbq: Bitmap queue in question. * @min_shallow_depth: The minimum shallow depth that will be passed to * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). * * sbitmap_queue_clear() batches wakeups as an optimization. The batch size * depends on the depth of the bitmap. Since the shallow allocation functions * effectively operate with a different depth, the shallow depth must be taken * into account when calculating the batch size. This function must be called * with the minimum shallow depth that will be used. Failure to do so can result * in missed wakeups. */ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, unsigned int min_shallow_depth); /** * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a * &struct sbitmap_queue. * @sbq: Bitmap to free from. * @nr: Bit number to free. * @cpu: CPU the bit was allocated on. */ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu); /** * sbitmap_queue_clear_batch() - Free a batch of allocated bits * &struct sbitmap_queue. * @sbq: Bitmap to free from. * @offset: offset for each tag in array * @tags: array of tags * @nr_tags: number of tags in array */ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, int *tags, int nr_tags); static inline int sbq_index_inc(int index) { return (index + 1) & (SBQ_WAIT_QUEUES - 1); } static inline void sbq_index_atomic_inc(atomic_t *index) { int old = atomic_read(index); int new = sbq_index_inc(old); atomic_cmpxchg(index, old, new); } /** * sbq_wait_ptr() - Get the next wait queue to use for a &struct * sbitmap_queue. * @sbq: Bitmap queue to wait on. * @wait_index: A counter per "user" of @sbq. */ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, atomic_t *wait_index) { struct sbq_wait_state *ws; ws = &sbq->ws[atomic_read(wait_index)]; sbq_index_atomic_inc(wait_index); return ws; } /** * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct * sbitmap_queue. * @sbq: Bitmap queue to wake up. */ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); /** * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue * on a &struct sbitmap_queue. * @sbq: Bitmap queue to wake up. * @nr: Number of bits cleared. */ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr); /** * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct * seq_file. * @sbq: Bitmap queue to show. * @m: struct seq_file to write to. * * This is intended for debugging. The format may change at any time. */ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); struct sbq_wait { struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ struct wait_queue_entry wait; }; #define DEFINE_SBQ_WAIT(name) \ struct sbq_wait name = { \ .sbq = NULL, \ .wait = { \ .private = current, \ .func = autoremove_wake_function, \ .entry = LIST_HEAD_INIT((name).wait.entry), \ } \ } /* * Wrapper around prepare_to_wait_exclusive(), which maintains some extra * internal state. */ void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait, int state); /* * Must be paired with sbitmap_prepare_to_wait(). */ void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait); /* * Wrapper around add_wait_queue(), which maintains some extra internal state */ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, struct sbq_wait *sbq_wait); /* * Must be paired with sbitmap_add_wait_queue() */ void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); #endif /* __LINUX_SCALE_BITMAP_H */
9 9 9 8 8 8 9 9 9 9 9 9 9 9 9 9 9 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 // SPDX-License-Identifier: GPL-2.0 /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation * Copyright 2023 NXP */ #include <linux/property.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "hci_codec.h" #include "hci_debugfs.h" #include "smp.h" #include "eir.h" #include "msft.h" #include "aosp.h" #include "leds.h" static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); if (hdev->req_status != HCI_REQ_PEND) return; hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; /* Free the request command so it is not used as response */ kfree_skb(hdev->req_skb); hdev->req_skb = NULL; if (skb) { struct sock *sk = hci_skb_sk(skb); /* Drop sk reference if set */ if (sk) sock_put(sk); hdev->req_rsp = skb_get(skb); } wake_up_interruptible(&hdev->req_wait_q); } struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, struct sock *sk) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) return NULL; hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) skb_put_data(skb, param, plen); bt_dev_dbg(hdev, "skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; /* Grab a reference if command needs to be associated with a sock (e.g. * likely mgmt socket that initiated the command). */ if (sk) { hci_skb_sk(skb) = sk; sock_hold(sk); } return skb; } static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, const void *param, u8 event, struct sock *sk) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) return; skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); req->err = -ENOMEM; return; } if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; hci_skb_event(skb) = event; skb_queue_tail(&req->cmd_q, skb); } static int hci_req_sync_run(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; unsigned long flags; bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { skb_queue_purge(&req->cmd_q); return req->err; } /* Do not allow empty requests */ if (skb_queue_empty(&req->cmd_q)) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } static void hci_request_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); req->hdev = hdev; req->err = 0; } /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, struct sock *sk) { struct hci_request req; struct sk_buff *skb; int err = 0; bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); hci_request_init(&req, hdev); hci_cmd_sync_add(&req, opcode, plen, param, event, sk); hdev->req_status = HCI_REQ_PEND; err = hci_req_sync_run(&req); if (err < 0) return ERR_PTR(err); err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return ERR_PTR(-EINTR); switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); break; case HCI_REQ_CANCELED: err = -hdev->req_result; break; default: err = -ETIMEDOUT; break; } hdev->req_status = 0; hdev->req_result = 0; skb = hdev->req_rsp; hdev->req_rsp = NULL; bt_dev_dbg(hdev, "end: err %d", err); if (err < 0) { kfree_skb(skb); return ERR_PTR(err); } /* If command return a status event skb will be set to NULL as there are * no parameters. */ if (!skb) return ERR_PTR(-ENODATA); return skb; } EXPORT_SYMBOL(__hci_cmd_sync_sk); /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync); /* Send HCI command and wait for command complete event */ struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { struct sk_buff *skb; if (!test_bit(HCI_UP, &hdev->flags)) return ERR_PTR(-ENETDOWN); bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); hci_req_sync_lock(hdev); skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); hci_req_sync_unlock(hdev); return skb; } EXPORT_SYMBOL(hci_cmd_sync); /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout) { return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync_ev); /* This function requires the caller holds hdev->req_lock. */ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, struct sock *sk) { struct sk_buff *skb; u8 status; skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); /* If command return a status event, skb will be set to -ENODATA */ if (skb == ERR_PTR(-ENODATA)) return 0; if (IS_ERR(skb)) { if (!event) bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, PTR_ERR(skb)); return PTR_ERR(skb); } status = skb->data[0]; kfree_skb(skb); return status; } EXPORT_SYMBOL(__hci_cmd_sync_status_sk); int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync_status); int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { int err; hci_req_sync_lock(hdev); err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout); hci_req_sync_unlock(hdev); return err; } EXPORT_SYMBOL(hci_cmd_sync_status); static void hci_cmd_sync_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); bt_dev_dbg(hdev, ""); /* Dequeue all entries and run them */ while (1) { struct hci_cmd_sync_work_entry *entry; mutex_lock(&hdev->cmd_sync_work_lock); entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, struct hci_cmd_sync_work_entry, list); if (entry) list_del(&entry->list); mutex_unlock(&hdev->cmd_sync_work_lock); if (!entry) break; bt_dev_dbg(hdev, "entry %p", entry); if (entry->func) { int err; hci_req_sync_lock(hdev); err = entry->func(hdev, entry->data); if (entry->destroy) entry->destroy(hdev, entry->data, err); hci_req_sync_unlock(hdev); } kfree(entry); } } static void hci_cmd_sync_cancel_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); cancel_delayed_work_sync(&hdev->cmd_timer); cancel_delayed_work_sync(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); wake_up_interruptible(&hdev->req_wait_q); } static int hci_scan_disable_sync(struct hci_dev *hdev); static int scan_disable_sync(struct hci_dev *hdev, void *data) { return hci_scan_disable_sync(hdev); } static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) { return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0); } static void le_scan_disable(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_disable.work); int status; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) goto _return; status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); if (status) { bt_dev_err(hdev, "failed to disable LE scan: %d", status); goto _return; } /* If we were running LE only scan, change discovery state. If * we were running both LE and BR/EDR inquiry simultaneously, * and BR/EDR inquiry is already finished, stop discovery, * otherwise BR/EDR inquiry will stop discovery when finished. * If we will resolve remote device name, do not change * discovery state. */ if (hdev->discovery.type == DISCOV_TYPE_LE) goto discov_stopped; if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) goto _return; if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { if (!test_bit(HCI_INQUIRY, &hdev->flags) && hdev->discovery.state != DISCOVERY_RESOLVING) goto discov_stopped; goto _return; } status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); if (status) { bt_dev_err(hdev, "inquiry failed: status %d", status); goto discov_stopped; } goto _return; discov_stopped: hci_discovery_set_state(hdev, DISCOVERY_STOPPED); _return: hci_dev_unlock(hdev); } static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup); static int reenable_adv_sync(struct hci_dev *hdev, void *data) { bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return 0; if (hdev->cur_adv_instance) { return hci_schedule_adv_instance_sync(hdev, hdev->cur_adv_instance, true); } else { if (ext_adv_capable(hdev)) { hci_start_ext_adv_sync(hdev, 0x00); } else { hci_update_adv_data_sync(hdev, 0x00); hci_update_scan_rsp_data_sync(hdev, 0x00); hci_enable_advertising_sync(hdev); } } return 0; } static void reenable_adv(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, reenable_adv_work); int status; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); if (status) bt_dev_err(hdev, "failed to reenable ADV: %d", status); hci_dev_unlock(hdev); } static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { hdev->adv_instance_timeout = 0; cancel_delayed_work(&hdev->adv_instance_expire); } } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force) { struct adv_info *adv_instance, *n, *next_instance = NULL; int err; u8 rem_inst; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (instance && hdev->cur_adv_instance == instance) next_instance = hci_get_next_instance(hdev, instance); if (instance == 0x00) { list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { if (!(force || adv_instance->timeout)) continue; rem_inst = adv_instance->instance; err = hci_remove_adv_instance(hdev, rem_inst); if (!err) mgmt_advertising_removed(sk, hdev, rem_inst); } } else { adv_instance = hci_find_adv_instance(hdev, instance); if (force || (adv_instance && adv_instance->timeout && !adv_instance->remaining_time)) { /* Don't advertise a removed instance. */ if (next_instance && next_instance->instance == instance) next_instance = NULL; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } } if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return 0; if (next_instance && !ext_adv_capable(hdev)) return hci_schedule_adv_instance_sync(hdev, next_instance->instance, false); return 0; } static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) { u8 instance = *(u8 *)data; kfree(data); hci_clear_adv_instance_sync(hdev, NULL, instance, false); if (list_empty(&hdev->adv_instances)) return hci_disable_advertising_sync(hdev); return 0; } static void adv_timeout_expire(struct work_struct *work) { u8 *inst_ptr; struct hci_dev *hdev = container_of(work, struct hci_dev, adv_instance_expire.work); bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); hdev->adv_instance_timeout = 0; if (hdev->cur_adv_instance == 0x00) goto unlock; inst_ptr = kmalloc(1, GFP_KERNEL); if (!inst_ptr) goto unlock; *inst_ptr = hdev->cur_adv_instance; hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); unlock: hci_dev_unlock(hdev); } static bool is_interleave_scanning(struct hci_dev *hdev) { return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; } static int hci_passive_scan_sync(struct hci_dev *hdev); static void interleave_scan_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, interleave_scan.work); unsigned long timeout; if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); } else { bt_dev_err(hdev, "unexpected error"); return; } hci_passive_scan_sync(hdev); hci_dev_lock(hdev); switch (hdev->interleave_scan_state) { case INTERLEAVE_SCAN_ALLOWLIST: bt_dev_dbg(hdev, "next state: allowlist"); hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; break; case INTERLEAVE_SCAN_NO_FILTER: bt_dev_dbg(hdev, "next state: no filter"); hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; break; case INTERLEAVE_SCAN_NONE: bt_dev_err(hdev, "unexpected error"); } hci_dev_unlock(hdev); /* Don't continue interleaving if it was canceled */ if (is_interleave_scanning(hdev)) queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, timeout); } void hci_cmd_sync_init(struct hci_dev *hdev) { INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_LIST_HEAD(&hdev->cmd_sync_work_list); mutex_init(&hdev->cmd_sync_work_lock); mutex_init(&hdev->unregister_lock); INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); INIT_WORK(&hdev->reenable_adv_work, reenable_adv); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); } static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, struct hci_cmd_sync_work_entry *entry, int err) { if (entry->destroy) entry->destroy(hdev, entry->data, err); list_del(&entry->list); kfree(entry); } void hci_cmd_sync_clear(struct hci_dev *hdev) { struct hci_cmd_sync_work_entry *entry, *tmp; cancel_work_sync(&hdev->cmd_sync_work); cancel_work_sync(&hdev->reenable_adv_work); mutex_lock(&hdev->cmd_sync_work_lock); list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); mutex_unlock(&hdev->cmd_sync_work_lock); } void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); } } EXPORT_SYMBOL(hci_cmd_sync_cancel); /* Cancel ongoing command request synchronously: * * - Set result and mark status to HCI_REQ_CANCELED * - Wakeup command sync thread */ void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { /* req_result is __u32 so error must be positive to be properly * propagated. */ hdev->req_result = err < 0 ? -err : err; hdev->req_status = HCI_REQ_CANCELED; wake_up_interruptible(&hdev->req_wait_q); } } EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); /* Submit HCI command to be run in as cmd_sync_work: * * - hdev must _not_ be unregistered */ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; int err = 0; mutex_lock(&hdev->unregister_lock); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { err = -ENODEV; goto unlock; } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { err = -ENOMEM; goto unlock; } entry->func = func; entry->data = data; entry->destroy = destroy; mutex_lock(&hdev->cmd_sync_work_lock); list_add_tail(&entry->list, &hdev->cmd_sync_work_list); mutex_unlock(&hdev->cmd_sync_work_lock); queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); unlock: mutex_unlock(&hdev->unregister_lock); return err; } EXPORT_SYMBOL(hci_cmd_sync_submit); /* Queue HCI command: * * - hdev must be running */ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { /* Only queue command if hdev is running which means it had been opened * and is either on init phase or is already up. */ if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; return hci_cmd_sync_submit(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_queue); static struct hci_cmd_sync_work_entry * _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { if (func && entry->func != func) continue; if (data && entry->data != data) continue; if (destroy && entry->destroy != destroy) continue; return entry; } return NULL; } /* Queue HCI command entry once: * * - Lookup if an entry already exist and only if it doesn't creates a new entry * and queue it. */ int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) return 0; return hci_cmd_sync_queue(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_queue_once); /* Run HCI command: * * - hdev must be running * - if on cmd_sync_work then run immediately otherwise queue */ int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { /* Only queue command if hdev is running which means it had been opened * and is either on init phase or is already up. */ if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; /* If on cmd_sync_work then run immediately otherwise queue */ if (current_work() == &hdev->cmd_sync_work) return func(hdev, data); return hci_cmd_sync_submit(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_run); /* Run HCI command entry once: * * - Lookup if an entry already exist and only if it doesn't creates a new entry * and run it. * - if on cmd_sync_work then run immediately otherwise queue */ int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) return 0; return hci_cmd_sync_run(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_run_once); /* Lookup HCI command entry: * * - Return first entry that matches by function callback or data or * destroy callback. */ struct hci_cmd_sync_work_entry * hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; mutex_lock(&hdev->cmd_sync_work_lock); entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); mutex_unlock(&hdev->cmd_sync_work_lock); return entry; } EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); /* Cancel HCI command entry */ void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, struct hci_cmd_sync_work_entry *entry) { mutex_lock(&hdev->cmd_sync_work_lock); _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); mutex_unlock(&hdev->cmd_sync_work_lock); } EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); /* Dequeue one HCI command entry: * * - Lookup and cancel first entry that matches. */ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); if (!entry) return false; hci_cmd_sync_cancel_entry(hdev, entry); return true; } EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); /* Dequeue HCI command entry: * * - Lookup and cancel any entry that matches by function callback or data or * destroy callback. */ bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; bool ret = false; mutex_lock(&hdev->cmd_sync_work_lock); while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy))) { _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); ret = true; } mutex_unlock(&hdev->cmd_sync_work_lock); return ret; } EXPORT_SYMBOL(hci_cmd_sync_dequeue); int hci_update_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return 0; if (!lmp_ext_inq_capable(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return 0; memset(&cp, 0, sizeof(cp)); eir_create(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return 0; memcpy(hdev->eir, cp.data, sizeof(cp.data)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; u8 val = 0; list_for_each_entry(uuid, &hdev->uuids, list) val |= uuid->svc_hint; return val; } int hci_update_class_sync(struct hci_dev *hdev) { u8 cod[3]; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return 0; cod[0] = hdev->minor_class; cod[1] = hdev->major_class; cod[2] = get_service_classes(hdev); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) cod[1] |= 0x20; if (memcmp(cod, hdev->dev_class, 3) == 0) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod, HCI_CMD_TIMEOUT); } static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) { /* If there is no connection we are OK to advertise. */ if (hci_conn_num(hdev, LE_LINK) == 0) return true; /* Check le_states if there is any connection in peripheral role. */ if (hdev->conn_hash.le_num_peripheral > 0) { /* Peripheral connection state and non connectable mode * bit 20. */ if (!connectable && !(hdev->le_states[2] & 0x10)) return false; /* Peripheral connection state and connectable mode bit 38 * and scannable bit 21. */ if (connectable && (!(hdev->le_states[4] & 0x40) || !(hdev->le_states[2] & 0x20))) return false; } /* Check le_states if there is any connection in central role. */ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { /* Central connection state and non connectable mode bit 18. */ if (!connectable && !(hdev->le_states[2] & 0x02)) return false; /* Central connection state and connectable mode bit 35 and * scannable 19. */ if (connectable && (!(hdev->le_states[4] & 0x08) || !(hdev->le_states[2] & 0x08))) return false; } return true; } static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) { /* If privacy is not enabled don't use RPA */ if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return false; /* If basic privacy mode is enabled use RPA */ if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return true; /* If limited privacy mode is enabled don't use RPA if we're * both discoverable and bondable. */ if ((flags & MGMT_ADV_FLAG_DISCOV) && hci_dev_test_flag(hdev, HCI_BONDABLE)) return false; /* We're neither bondable nor discoverable in the limited * privacy mode, therefore use RPA. */ return true; } static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) { /* If a random_addr has been set we're advertising or initiating an LE * connection we can't go ahead and change the random address at this * time. This is because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). * * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ if (bacmp(&hdev->random_addr, BDADDR_ANY) && (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_lookup_le_connect(hdev))) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return 0; } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa, HCI_CMD_TIMEOUT); } int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, bool rpa, u8 *own_addr_type) { int err; /* If privacy is enabled use a resolvable private address. If * current RPA has expired or there is something else than * the current RPA in use, then generate a new one. */ if (rpa) { /* If Controller supports LL Privacy use own address type is * 0x03 */ if (ll_privacy_capable(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; /* Check if RPA is valid */ if (rpa_valid(hdev)) return 0; err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } err = hci_set_random_addr_sync(hdev, &hdev->rpa); if (err) return err; return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for active * scanning and non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; return hci_set_random_addr_sync(hdev, &nrpa); } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { *own_addr_type = ADDR_LE_DEV_RANDOM; if (bacmp(&hdev->static_addr, &hdev->random_addr)) return hci_set_random_addr_sync(hdev, &hdev->static_addr); return 0; } /* Neither privacy nor static address is being used so use a * public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; u8 size; struct adv_info *adv = NULL; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; /* If not enabled there is nothing to do */ if (!adv->enabled) return 0; } memset(data, 0, sizeof(data)); cp = (void *)data; set = (void *)cp->data; /* Instance 0x00 indicates all advertising instances will be disabled */ cp->num_of_sets = !!instance; cp->enable = 0x00; set->handle = adv ? adv->handle : instance; size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, size, data, HCI_CMD_TIMEOUT); } static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, bdaddr_t *random_addr) { struct hci_cp_le_set_adv_set_rand_addr cp; int err; if (!instance) { /* Instance 0x00 doesn't have an adv_info, instead it uses * hdev->random_addr to track its address so whenever it needs * to be updated this also set the random address since * hdev->random_addr is shared with scan state machine. */ err = hci_set_random_addr_sync(hdev, random_addr); if (err) return err; } memset(&cp, 0, sizeof(cp)); cp.handle = instance; bacpy(&cp.bdaddr, random_addr); return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; bool connectable; u32 flags; bdaddr_t random_addr; u8 own_addr_type; int err; struct adv_info *adv; bool secondary_adv; if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; } else { adv = NULL; } /* Updating parameters of an active instance will return a * Command Disallowed error, so we must first disable the * instance if it is active. */ if (adv && !adv->pending) { err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; } flags = hci_adv_instance_flags(hdev, instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EPERM; /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ err = hci_get_random_address(hdev, !connectable, adv_use_rpa(hdev, flags), adv, &own_addr_type, &random_addr); if (err < 0) return err; memset(&cp, 0, sizeof(cp)); if (adv) { hci_cpu_to_le24(adv->min_interval, cp.min_interval); hci_cpu_to_le24(adv->max_interval, cp.max_interval); cp.tx_power = adv->tx_power; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); if (connectable) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); } else if (hci_adv_instance_is_scannable(hdev, instance) || (flags & MGMT_ADV_PARAM_SCAN_RSP)) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); } else { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); } /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter * contains the peer’s Identity Address and the Peer_Address_Type * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). * These parameters are used to locate the corresponding local IRK in * the resolving list; this IRK is used to generate their own address * used in the advertisement. */ if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) hci_copy_identity_address(hdev, &cp.peer_addr, &cp.peer_addr_type); cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.handle = adv ? adv->handle : instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_2M; } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { cp.primary_phy = HCI_ADV_PHY_CODED; cp.secondary_phy = HCI_ADV_PHY_CODED; } else { /* In all other cases use 1M */ cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; } err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; if ((own_addr_type == ADDR_LE_DEV_RANDOM || own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && bacmp(&random_addr, BDADDR_ANY)) { /* Check if random address need to be updated */ if (adv) { if (!bacmp(&random_addr, &adv->random_addr)) return 0; } else { if (!bacmp(&random_addr, &hdev->random_addr)) return 0; } return hci_set_adv_set_random_addr_sync(hdev, instance, &random_addr); } return 0; } static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length, HCI_MAX_EXT_AD_LENGTH); u8 len; struct adv_info *adv = NULL; int err; if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->scan_rsp_changed) return 0; } len = eir_create_scan_rsp(hdev, instance, pdu->data); pdu->handle = adv ? adv->handle : instance; pdu->length = len; pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, struct_size(pdu, data, len), pdu, HCI_CMD_TIMEOUT); if (err) return err; if (adv) { adv->scan_rsp_changed = false; } else { memcpy(hdev->scan_rsp_data, pdu->data, len); hdev->scan_rsp_data_len = len; } return 0; } static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_scan_rsp_data cp; u8 len; memset(&cp, 0, sizeof(cp)); len = eir_create_scan_rsp(hdev, instance, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) return 0; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); hdev->scan_rsp_data_len = len; cp.length = len; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; if (ext_adv_capable(hdev)) return hci_set_ext_scan_rsp_data_sync(hdev, instance); return __hci_set_scan_rsp_data_sync(hdev, instance); } int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; struct adv_info *adv; if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; /* If already enabled there is nothing to do */ if (adv->enabled) return 0; } else { adv = NULL; } cp = (void *)data; set = (void *)cp->data; memset(cp, 0, sizeof(*cp)); cp->enable = 0x01; cp->num_of_sets = 0x01; memset(set, 0, sizeof(*set)); set->handle = adv ? adv->handle : instance; /* Set duration per instance since controller is responsible for * scheduling it. */ if (adv && adv->timeout) { u16 duration = adv->timeout * MSEC_PER_SEC; /* Time = N * 10 ms */ set->duration = cpu_to_le16(duration / 10); } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(*cp) + sizeof(*set) * cp->num_of_sets, data, HCI_CMD_TIMEOUT); } int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) { int err; err = hci_setup_ext_adv_instance_sync(hdev, instance); if (err) return err; err = hci_set_ext_scan_rsp_data_sync(hdev, instance); if (err) return err; return hci_enable_ext_advertising_sync(hdev, instance); } int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_per_adv_enable cp; struct adv_info *adv = NULL; /* If periodic advertising already disabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->periodic || !adv->enabled) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x00; cp.handle = instance; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, u16 min_interval, u16 max_interval) { struct hci_cp_le_set_per_adv_params cp; memset(&cp, 0, sizeof(cp)); if (!min_interval) min_interval = DISCOV_LE_PER_ADV_INT_MIN; if (!max_interval) max_interval = DISCOV_LE_PER_ADV_INT_MAX; cp.handle = instance; cp.min_interval = cpu_to_le16(min_interval); cp.max_interval = cpu_to_le16(max_interval); cp.periodic_properties = 0x0000; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) { DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length, HCI_MAX_PER_AD_LENGTH); u8 len; struct adv_info *adv = NULL; if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->periodic) return 0; } len = eir_create_per_adv_data(hdev, instance, pdu->data); pdu->length = len; pdu->handle = adv ? adv->handle : instance; pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, struct_size(pdu, data, len), pdu, HCI_CMD_TIMEOUT); } static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_per_adv_enable cp; struct adv_info *adv = NULL; /* If periodic advertising already enabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); if (adv && adv->periodic && adv->enabled) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x01; cp.handle = instance; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Checks if periodic advertising data contains a Basic Announcement and if it * does generates a Broadcast ID and add Broadcast Announcement. */ static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) { u8 bid[3]; u8 ad[4 + 3]; /* Skip if NULL adv as instance 0x00 is used for general purpose * advertising so it cannot used for the likes of Broadcast Announcement * as it can be overwritten at any point. */ if (!adv) return 0; /* Check if PA data doesn't contains a Basic Audio Announcement then * there is nothing to do. */ if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, 0x1851, NULL)) return 0; /* Check if advertising data already has a Broadcast Announcement since * the process may want to control the Broadcast ID directly and in that * case the kernel shall no interfere. */ if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, NULL)) return 0; /* Generate Broadcast ID */ get_random_bytes(bid, sizeof(bid)); eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); return hci_update_adv_data_sync(hdev, adv->instance); } int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, u8 *data, u32 flags, u16 min_interval, u16 max_interval, u16 sync_interval) { struct adv_info *adv = NULL; int err; bool added = false; hci_disable_per_advertising_sync(hdev, instance); if (instance) { adv = hci_find_adv_instance(hdev, instance); /* Create an instance if that could not be found */ if (!adv) { adv = hci_add_per_instance(hdev, instance, flags, data_len, data, sync_interval, sync_interval); if (IS_ERR(adv)) return PTR_ERR(adv); adv->pending = false; added = true; } } /* Start advertising */ err = hci_start_ext_adv_sync(hdev, instance); if (err < 0) goto fail; err = hci_adv_bcast_annoucement(hdev, adv); if (err < 0) goto fail; err = hci_set_per_adv_params_sync(hdev, instance, min_interval, max_interval); if (err < 0) goto fail; err = hci_set_per_adv_data_sync(hdev, instance); if (err < 0) goto fail; err = hci_enable_per_advertising_sync(hdev, instance); if (err < 0) goto fail; return 0; fail: if (added) hci_remove_adv_instance(hdev, instance); return err; } static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) { int err; if (ext_adv_capable(hdev)) return hci_start_ext_adv_sync(hdev, instance); err = hci_update_adv_data_sync(hdev, instance); if (err) return err; err = hci_update_scan_rsp_data_sync(hdev, instance); if (err) return err; return hci_enable_advertising_sync(hdev); } int hci_enable_advertising_sync(struct hci_dev *hdev) { struct adv_info *adv_instance; struct hci_cp_le_set_adv_param cp; u8 own_addr_type, enable = 0x01; bool connectable; u16 adv_min_interval, adv_max_interval; u32 flags; u8 status; if (ext_adv_capable(hdev)) return hci_enable_ext_advertising_sync(hdev, hdev->cur_adv_instance); flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EINVAL; status = hci_disable_advertising_sync(hdev); if (status) return status; /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ status = hci_update_random_address_sync(hdev, !connectable, adv_use_rpa(hdev, flags), &own_addr_type); if (status) return status; memset(&cp, 0, sizeof(cp)); if (adv_instance) { adv_min_interval = adv_instance->min_interval; adv_max_interval = adv_instance->max_interval; } else { adv_min_interval = hdev->le_adv_min_interval; adv_max_interval = hdev->le_adv_max_interval; } if (connectable) { cp.type = LE_ADV_IND; } else { if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) cp.type = LE_ADV_SCAN_IND; else cp.type = LE_ADV_NONCONN_IND; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; } } cp.min_interval = cpu_to_le16(adv_min_interval); cp.max_interval = cpu_to_le16(adv_max_interval); cp.own_address_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (status) return status; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static int enable_advertising_sync(struct hci_dev *hdev, void *data) { return hci_enable_advertising_sync(hdev); } int hci_enable_advertising(struct hci_dev *hdev) { if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return 0; return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); } int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, struct sock *sk) { int err; if (!ext_adv_capable(hdev)) return 0; err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0 && !hci_find_adv_instance(hdev, instance)) return -EINVAL; return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance, 0, HCI_CMD_TIMEOUT, sk); } int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) { struct hci_cp_le_term_big cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) { DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length, HCI_MAX_EXT_AD_LENGTH); u8 len; struct adv_info *adv = NULL; int err; if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->adv_data_changed) return 0; } len = eir_create_adv_data(hdev, instance, pdu->data); pdu->length = len; pdu->handle = adv ? adv->handle : instance; pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, struct_size(pdu, data, len), pdu, HCI_CMD_TIMEOUT); if (err) return err; /* Update data if the command succeed */ if (adv) { adv->adv_data_changed = false; } else { memcpy(hdev->adv_data, pdu->data, len); hdev->adv_data_len = len; } return 0; } static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_adv_data cp; u8 len; memset(&cp, 0, sizeof(cp)); len = eir_create_adv_data(hdev, instance, cp.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && memcmp(cp.data, hdev->adv_data, len) == 0) return 0; memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); hdev->adv_data_len = len; cp.length = len; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; if (ext_adv_capable(hdev)) return hci_set_ext_adv_data_sync(hdev, instance); return hci_set_adv_data_sync(hdev, instance); } int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, bool force) { struct adv_info *adv = NULL; u16 timeout; if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) return -EPERM; if (hdev->adv_instance_timeout) return -EBUSY; adv = hci_find_adv_instance(hdev, instance); if (!adv) return -ENOENT; /* A zero timeout means unlimited advertising. As long as there is * only one instance, duration should be ignored. We still set a timeout * in case further instances are being added later on. * * If the remaining lifetime of the instance is more than the duration * then the timeout corresponds to the duration, otherwise it will be * reduced to the remaining instance lifetime. */ if (adv->timeout == 0 || adv->duration <= adv->remaining_time) timeout = adv->duration; else timeout = adv->remaining_time; /* The remaining time is being reduced unless the instance is being * advertised without time limit. */ if (adv->timeout) adv->remaining_time = adv->remaining_time - timeout; /* Only use work for scheduling instances with legacy advertising */ if (!ext_adv_capable(hdev)) { hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, secs_to_jiffies(timeout)); } /* If we're just re-scheduling the same instance again then do not * execute any HCI commands. This happens when a single instance is * being advertised. */ if (!force && hdev->cur_adv_instance == instance && hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; hdev->cur_adv_instance = instance; return hci_start_adv_sync(hdev, instance); } static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) { int err; if (!ext_adv_capable(hdev)) return 0; /* Disable instance 0x00 to disable all instances */ err = hci_disable_ext_adv_instance_sync(hdev, 0x00); if (err) return err; return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) { struct adv_info *adv, *n; int err = 0; if (ext_adv_capable(hdev)) /* Remove all existing sets */ err = hci_clear_adv_sets_sync(hdev, sk); if (ext_adv_capable(hdev)) return err; /* This is safe as long as there is no command send while the lock is * held. */ hci_dev_lock(hdev); /* Cleanup non-ext instances */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { u8 instance = adv->instance; int err; if (!(force || adv->timeout)) continue; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } hci_dev_unlock(hdev); return 0; } static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, struct sock *sk) { int err = 0; /* If we use extended advertising, instance has to be removed first. */ if (ext_adv_capable(hdev)) err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); if (ext_adv_capable(hdev)) return err; /* This is safe as long as there is no command send while the lock is * held. */ hci_dev_lock(hdev); err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); hci_dev_unlock(hdev); return err; } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force) { struct adv_info *next = NULL; int err; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (hdev->cur_adv_instance == instance) next = hci_get_next_instance(hdev, instance); if (!instance) { err = hci_clear_adv_sync(hdev, sk, force); if (err) return err; } else { struct adv_info *adv = hci_find_adv_instance(hdev, instance); if (force || (adv && adv->timeout && !adv->remaining_time)) { /* Don't advertise a removed instance. */ if (next && next->instance == instance) next = NULL; err = hci_remove_adv_sync(hdev, instance, sk); if (err) return err; } } if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return 0; if (next && !ext_adv_capable(hdev)) hci_schedule_adv_instance_sync(hdev, next->instance, false); return 0; } int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) { struct hci_cp_read_rssi cp; cp.handle = handle; return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, sizeof(*cp), cp, HCI_CMD_TIMEOUT); } int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) { struct hci_cp_read_tx_power cp; cp.handle = handle; cp.type = type; return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_disable_advertising_sync(struct hci_dev *hdev) { u8 enable = 0x00; int err = 0; /* If controller is not advertising we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; if (ext_adv_capable(hdev)) err = hci_disable_ext_adv_instance_sync(hdev, 0x00); if (ext_adv_capable(hdev)) return err; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup) { struct hci_cp_le_set_ext_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = val; if (hci_dev_test_flag(hdev, HCI_MESH)) cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; else cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup) { struct hci_cp_le_set_scan_enable cp; if (use_ext_scan(hdev)) return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); memset(&cp, 0, sizeof(cp)); cp.enable = val; if (val && hci_dev_test_flag(hdev, HCI_MESH)) cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; else cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) { if (!ll_privacy_capable(hdev)) return 0; /* If controller is not/already resolving we are done. */ if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, sizeof(val), &val, HCI_CMD_TIMEOUT); } static int hci_scan_disable_sync(struct hci_dev *hdev) { int err; /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return 0; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); if (err) { bt_dev_err(hdev, "Unable to disable scanning: %d", err); return err; } return err; } static bool scan_use_rpa(struct hci_dev *hdev) { return hci_dev_test_flag(hdev, HCI_PRIVACY); } static void hci_start_interleave_scan(struct hci_dev *hdev) { hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, 0); } static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); cancel_delayed_work_sync(&hdev->interleave_scan); hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; } /* Return true if interleave_scan wasn't started until exiting this function, * otherwise, return false */ static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) { /* Do interleaved scan only if all of the following are true: * - There is at least one ADV monitor * - At least one pending LE connection or one device to be scanned for * - Monitor offloading is not supported * If so, we should alternate between allowlist scan and one without * any filters to save power. */ bool use_interleaving = hci_is_adv_monitoring(hdev) && !(list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports)) && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE; bool is_interleaving = is_interleave_scanning(hdev); if (use_interleaving && !is_interleaving) { hci_start_interleave_scan(hdev); bt_dev_dbg(hdev, "starting interleave scan"); return true; } if (!use_interleaving && is_interleaving) cancel_interleave_scan(hdev); return false; } /* Removes connection to resolve list if needed.*/ static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_resolv_list cp; struct bdaddr_list_with_irk *entry; if (!ll_privacy_capable(hdev)) return 0; /* Check if the IRK has been programmed */ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, bdaddr_type); if (!entry) return 0; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_del_accept_list_sync(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_accept_list cp; int err; /* Check if device is on accept list before removing it */ if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) return 0; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); /* Ignore errors when removing from resolving list as that is likely * that the device was never added. */ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) { bt_dev_err(hdev, "Unable to remove from allow list: %d", err); return err; } bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, cp.bdaddr_type); return 0; } struct conn_params { bdaddr_t addr; u8 addr_type; hci_conn_flags_t flags; u8 privacy_mode; }; /* Adds connection to resolve list if needed. * Setting params to NULL programs local hdev->irk */ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, struct conn_params *params) { struct hci_cp_le_add_to_resolv_list cp; struct smp_irk *irk; struct bdaddr_list_with_irk *entry; struct hci_conn_params *p; if (!ll_privacy_capable(hdev)) return 0; /* Attempt to program local identity address, type and irk if params is * NULL. */ if (!params) { if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return 0; hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); memcpy(cp.peer_irk, hdev->irk, 16); goto done; } else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) return 0; irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type); if (!irk) return 0; /* Check if the IK has _not_ been programmed yet. */ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, &params->addr, params->addr_type); if (entry) return 0; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); memcpy(cp.peer_irk, irk->val, 16); /* Default privacy mode is always Network */ params->privacy_mode = HCI_NETWORK_PRIVACY; rcu_read_lock(); p = hci_pend_le_action_lookup(&hdev->pend_le_conns, &params->addr, params->addr_type); if (!p) p = hci_pend_le_action_lookup(&hdev->pend_le_reports, &params->addr, params->addr_type); if (p) WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); rcu_read_unlock(); done: if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); else memset(cp.local_irk, 0, 16); return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Set Device Privacy Mode. */ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, struct conn_params *params) { struct hci_cp_le_set_privacy_mode cp; struct smp_irk *irk; if (!ll_privacy_capable(hdev) || !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) return 0; /* If device privacy mode has already been set there is nothing to do */ if (params->privacy_mode == HCI_DEVICE_PRIVACY) return 0; /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also * indicates that LL Privacy has been enabled and * HCI_OP_LE_SET_PRIVACY_MODE is supported. */ if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) return 0; irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type); if (!irk) return 0; memset(&cp, 0, sizeof(cp)); cp.bdaddr_type = irk->addr_type; bacpy(&cp.bdaddr, &irk->bdaddr); cp.mode = HCI_DEVICE_PRIVACY; /* Note: params->privacy_mode is not updated since it is a copy */ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Adds connection to allow list if needed, if the device uses RPA (has IRK) * this attempts to program the device in the resolving list as well and * properly set the privacy mode. */ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, struct conn_params *params, u8 *num_entries) { struct hci_cp_le_add_to_accept_list cp; int err; /* During suspend, only wakeable devices can be in acceptlist */ if (hdev->suspended && !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { hci_le_del_accept_list_sync(hdev, &params->addr, params->addr_type); return 0; } /* Select filter policy to accept all advertising */ if (*num_entries >= hdev->le_accept_list_size) return -ENOSPC; /* Attempt to program the device in the resolving list first to avoid * having to rollback in case it fails since the resolving list is * dynamic it can probably be smaller than the accept list. */ err = hci_le_add_resolve_list_sync(hdev, params); if (err) { bt_dev_err(hdev, "Unable to add to resolve list: %d", err); return err; } /* Set Privacy Mode */ err = hci_le_set_privacy_mode_sync(hdev, params); if (err) { bt_dev_err(hdev, "Unable to set privacy mode: %d", err); return err; } /* Check if already in accept list */ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr, params->addr_type)) return 0; *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) { bt_dev_err(hdev, "Unable to add to allow list: %d", err); /* Rollback the device from the resolving list */ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); return err; } bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, cp.bdaddr_type); return 0; } /* This function disables/pause all advertising instances */ static int hci_pause_advertising_sync(struct hci_dev *hdev) { int err; int old_state; /* If already been paused there is nothing to do. */ if (hdev->advertising_paused) return 0; bt_dev_dbg(hdev, "Pausing directed advertising"); /* Stop directed advertising */ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); if (old_state) { /* When discoverable timeout triggers, then just make sure * the limited discoverable flag is cleared. Even in the case * of a timeout triggered from general discoverable, it is * safe to unconditionally clear the flag. */ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hdev->discov_timeout = 0; } bt_dev_dbg(hdev, "Pausing advertising instances"); /* Call to disable any advertisements active on the controller. * This will succeed even if no advertisements are configured. */ err = hci_disable_advertising_sync(hdev); if (err) return err; /* If we are using software rotation, pause the loop */ if (!ext_adv_capable(hdev)) cancel_adv_timeout(hdev); hdev->advertising_paused = true; hdev->advertising_old_state = old_state; return 0; } /* This function enables all user advertising instances */ static int hci_resume_advertising_sync(struct hci_dev *hdev) { struct adv_info *adv, *tmp; int err; /* If advertising has not been paused there is nothing to do. */ if (!hdev->advertising_paused) return 0; /* Resume directed advertising */ hdev->advertising_paused = false; if (hdev->advertising_old_state) { hci_dev_set_flag(hdev, HCI_ADVERTISING); hdev->advertising_old_state = 0; } bt_dev_dbg(hdev, "Resuming advertising instances"); if (ext_adv_capable(hdev)) { /* Call for each tracked instance to be re-enabled */ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { err = hci_enable_ext_advertising_sync(hdev, adv->instance); if (!err) continue; /* If the instance cannot be resumed remove it */ hci_remove_ext_adv_instance_sync(hdev, adv->instance, NULL); } } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop */ err = hci_schedule_adv_instance_sync(hdev, hdev->cur_adv_instance, true); } hdev->advertising_paused = false; return err; } static int hci_pause_addr_resolution(struct hci_dev *hdev) { int err; if (!ll_privacy_capable(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return 0; /* Cannot disable addr resolution if scanning is enabled or * when initiating an LE connection. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || hci_lookup_le_connect(hdev)) { bt_dev_err(hdev, "Command not allowed when scan/LE connect"); return -EPERM; } /* Cannot disable addr resolution if advertising is enabled. */ err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "Pause advertising failed: %d", err); return err; } err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); if (err) bt_dev_err(hdev, "Unable to disable Address Resolution: %d", err); /* Return if address resolution is disabled and RPA is not used. */ if (!err && scan_use_rpa(hdev)) return 0; hci_resume_advertising_sync(hdev); return err; } struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool extended, struct sock *sk) { u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : HCI_OP_READ_LOCAL_OOB_DATA; return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) { struct hci_conn_params *params; struct conn_params *p; size_t i; rcu_read_lock(); i = 0; list_for_each_entry_rcu(params, list, action) ++i; *n = i; rcu_read_unlock(); p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); if (!p) return NULL; rcu_read_lock(); i = 0; list_for_each_entry_rcu(params, list, action) { /* Racing adds are handled in next scan update */ if (i >= *n) break; /* No hdev->lock, but: addr, addr_type are immutable. * privacy_mode is only written by us or in * hci_cc_le_set_privacy_mode that we wait for. * We should be idempotent so MGMT updating flags * while we are processing is OK. */ bacpy(&p[i].addr, &params->addr); p[i].addr_type = params->addr_type; p[i].flags = READ_ONCE(params->flags); p[i].privacy_mode = READ_ONCE(params->privacy_mode); ++i; } rcu_read_unlock(); *n = i; return p; } /* Clear LE Accept List */ static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) { if (!(hdev->commands[26] & 0x80)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, HCI_CMD_TIMEOUT); } /* Device must not be scanning when updating the accept list. * * Update is done using the following sequence: * * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) -> * Remove Devices From Accept List -> * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))-> * Add Devices to Accept List -> * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) -> * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * In case of failure advertising shall be restored to its original state and * return would disable accept list since either accept or resolving list could * not be programmed. * */ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) { struct conn_params *params; struct bdaddr_list *b, *t; u8 num_entries = 0; bool pend_conn, pend_report; u8 filter_policy; size_t i, n; int err; /* Pause advertising if resolving list can be used as controllers * cannot accept resolving list modifications while advertising. */ if (ll_privacy_capable(hdev)) { err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "pause advertising failed: %d", err); return 0x00; } } /* Disable address resolution while reprogramming accept list since * devices that do have an IRK will be programmed in the resolving list * when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); if (err) { bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); goto done; } /* Force address filtering if PA Sync is in progress */ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { struct hci_cp_le_pa_create_sync *sent; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC); if (sent) { struct conn_params pa; memset(&pa, 0, sizeof(pa)); bacpy(&pa.addr, &sent->addr); pa.addr_type = sent->addr_type; /* Clear first since there could be addresses left * behind. */ hci_le_clear_accept_list_sync(hdev); num_entries = 1; err = hci_le_add_accept_list_sync(hdev, &pa, &num_entries); goto done; } } /* Go through the current accept list programmed into the * controller one by one and check if that address is connected or is * still in the list of pending connections or list of devices to * report. If not present in either list, then remove it from * the controller. */ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) continue; /* Pointers not dereferenced, no locks needed */ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, &b->bdaddr, b->bdaddr_type); /* If the device is not likely to connect or report, * remove it from the acceptlist. */ if (!pend_conn && !pend_report) { hci_le_del_accept_list_sync(hdev, &b->bdaddr, b->bdaddr_type); continue; } num_entries++; } /* Since all no longer valid accept list entries have been * removed, walk through the list of pending connections * and ensure that any new device gets programmed into * the controller. * * If the list of the devices is larger than the list of * available accept list entries in the controller, then * just abort and return filer policy value to not use the * accept list. * * The list and params may be mutated while we wait for events, * so make a copy and iterate it. */ params = conn_params_copy(&hdev->pend_le_conns, &n); if (!params) { err = -ENOMEM; goto done; } for (i = 0; i < n; ++i) { err = hci_le_add_accept_list_sync(hdev, &params[i], &num_entries); if (err) { kvfree(params); goto done; } } kvfree(params); /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * accept list if there is still space. Abort if space runs out. */ params = conn_params_copy(&hdev->pend_le_reports, &n); if (!params) { err = -ENOMEM; goto done; } for (i = 0; i < n; ++i) { err = hci_le_add_accept_list_sync(hdev, &params[i], &num_entries); if (err) { kvfree(params); goto done; } } kvfree(params); /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded * - Interleaved scanning is not currently using the allowlist */ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) err = -EINVAL; done: filter_policy = err ? 0x00 : 0x01; /* Enable address resolution when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); if (err) bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); /* Resume advertising if it was paused */ if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Select filter policy to use accept list */ return filter_policy; } static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, u8 type, u16 interval, u16 window) { cp->type = type; cp->interval = cpu_to_le16(interval); cp->window = cpu_to_le16(window); } static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy) { struct hci_cp_le_set_ext_scan_params *cp; struct hci_cp_le_scan_phy_params *phy; u8 data[sizeof(*cp) + sizeof(*phy) * 2]; u8 num_phy = 0x00; cp = (void *)data; phy = (void *)cp->data; memset(data, 0, sizeof(data)); cp->own_addr_type = own_addr_type; cp->filter_policy = filter_policy; /* Check if PA Sync is in progress then select the PHY based on the * hci_conn.iso_qos. */ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { struct hci_cp_le_add_to_accept_list *sent; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); if (sent) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK, &sent->bdaddr); if (conn) { struct bt_iso_qos *qos = &conn->iso_qos; if (qos->bcast.in.phy & BT_ISO_PHY_1M || qos->bcast.in.phy & BT_ISO_PHY_2M) { cp->scanning_phys |= LE_SCAN_PHY_1M; hci_le_scan_phy_params(phy, type, interval, window); num_phy++; phy++; } if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { cp->scanning_phys |= LE_SCAN_PHY_CODED; hci_le_scan_phy_params(phy, type, interval * 3, window * 3); num_phy++; phy++; } if (num_phy) goto done; } } } if (scan_1m(hdev) || scan_2m(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_1M; hci_le_scan_phy_params(phy, type, interval, window); num_phy++; phy++; } if (scan_coded(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_CODED; hci_le_scan_phy_params(phy, type, interval * 3, window * 3); num_phy++; phy++; } done: if (!num_phy) return -EINVAL; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, sizeof(*cp) + sizeof(*phy) * num_phy, data, HCI_CMD_TIMEOUT); } static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy) { struct hci_cp_le_set_scan_param cp; if (use_ext_scan(hdev)) return hci_le_set_ext_scan_param_sync(hdev, type, interval, window, own_addr_type, filter_policy); memset(&cp, 0, sizeof(cp)); cp.type = type; cp.interval = cpu_to_le16(interval); cp.window = cpu_to_le16(window); cp.own_address_type = own_addr_type; cp.filter_policy = filter_policy; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy, u8 filter_dup) { int err; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_le_set_scan_param_sync(hdev, type, interval, window, own_addr_type, filter_policy); if (err) return err; return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); } static int hci_passive_scan_sync(struct hci_dev *hdev) { u8 own_addr_type; u8 filter_policy; u16 window, interval; u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; int err; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_scan_disable_sync(hdev); if (err) { bt_dev_err(hdev, "disable scanning failed: %d", err); return err; } /* Set require_privacy to false since no SCAN_REQ are send * during passive scanning. Not using an non-resolvable address * here is important so that peer devices using direct * advertising with our address will be correctly reported * by the controller. */ if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), &own_addr_type)) return 0; if (hdev->enable_advmon_interleave_scan && hci_update_interleaved_scan_sync(hdev)) return 0; bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); /* Adding or removing entries from the accept list must * happen before enabling scanning. The controller does * not allow accept list modification while scanning. */ filter_policy = hci_update_accept_list_sync(hdev); /* If suspended and filter_policy set to 0x00 (no acceptlist) then * passive scanning cannot be started since that would require the host * to be woken up to process the reports. */ if (hdev->suspended && !filter_policy) { /* Check if accept list is empty then there is no need to scan * while suspended. */ if (list_empty(&hdev->le_accept_list)) return 0; /* If there are devices is the accept_list that means some * devices could not be programmed which in non-suspended case * means filter_policy needs to be set to 0x00 so the host needs * to filter, but since this is treating suspended case we * can ignore device needing host to filter to allow devices in * the acceptlist to be able to wakeup the system. */ filter_policy = 0x01; } /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support * for handling directed advertising. * * So instead of using filter polices 0x00 (no acceptlist) * and 0x01 (acceptlist enabled) use the new filter policies * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). */ if (hci_dev_test_flag(hdev, HCI_PRIVACY) && (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; /* Disable duplicates filter when scanning for advertisement * monitor for the following reasons. * * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm * controllers ignore RSSI_Sampling_Period when the duplicates * filter is enabled. * * For SW pattern filtering, when we're not doing interleaved * scanning, it is necessary to disable duplicates filter, * otherwise hosts can only receive one advertisement and it's * impossible to know if a peer is still in range. */ filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } /* Disable all filtering for Mesh */ if (hci_dev_test_flag(hdev, HCI_MESH)) { filter_policy = 0; filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, own_addr_type, filter_policy, filter_dups); } /* This function controls the passive scanning based on hdev->pend_le_conns * list. If there are pending LE connection we start the background scanning, * otherwise we stop it in the following sequence: * * If there are devices to scan: * * Disable Scanning -> Update Accept List -> * ll_privacy_capable((Disable Advertising) -> Disable Resolving List -> * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * Otherwise: * * Disable Scanning */ int hci_update_passive_scan_sync(struct hci_dev *hdev) { int err; if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* No point in doing scanning if LE support hasn't been enabled */ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; /* If discovery is active don't interfere with it */ if (hdev->discovery.state != DISCOVERY_STOPPED) return 0; /* Reset RSSI and UUID filters when starting background scanning * since these filters are meant for service discovery only. * * The Start Discovery and Start Service Discovery operations * ensure to set proper values for RSSI threshold and UUID * filter list. So it is safe to just reset them here. */ hci_discovery_filter_clear(hdev); bt_dev_dbg(hdev, "ADV monitoring is %s", hci_is_adv_monitoring(hdev) ? "on" : "off"); if (!hci_dev_test_flag(hdev, HCI_MESH) && list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && !hci_is_adv_monitoring(hdev) && !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. */ bt_dev_dbg(hdev, "stopping background scanning"); err = hci_scan_disable_sync(hdev); if (err) bt_dev_err(hdev, "stop background scanning failed: %d", err); } else { /* If there is at least one pending LE connection, we should * keep the background scan running. */ /* If controller is connecting, we should not start scanning * since some controllers are not able to scan and connect at * the same time. */ if (hci_lookup_le_connect(hdev)) return 0; bt_dev_dbg(hdev, "start background scanning"); err = hci_passive_scan_sync(hdev); if (err) bt_dev_err(hdev, "start background scanning failed: %d", err); } return err; } static int update_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_scan_sync(hdev); } int hci_update_scan(struct hci_dev *hdev) { return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); } static int update_passive_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); } int hci_update_passive_scan(struct hci_dev *hdev) { /* Only queue if it would have any effect */ if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, NULL); } int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) { int err; if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) return 0; err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, sizeof(val), &val, HCI_CMD_TIMEOUT); if (!err) { if (val) { hdev->features[1][0] |= LMP_HOST_SC; hci_dev_set_flag(hdev, HCI_SC_ENABLED); } else { hdev->features[1][0] &= ~LMP_HOST_SC; hci_dev_clear_flag(hdev, HCI_SC_ENABLED); } } return err; } int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) { int err; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || lmp_host_ssp_capable(hdev)) return 0; if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); if (err) return err; return hci_write_sc_support_sync(hdev, 0x01); } int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) { struct hci_cp_write_le_host_supported cp; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || !lmp_bredr_capable(hdev)) return 0; /* Check first if we already have the right host state * (host features set) */ if (le == lmp_host_le_capable(hdev) && simul == lmp_host_le_br_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); cp.le = le; cp.simul = simul; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_powered_update_adv_sync(struct hci_dev *hdev) { struct adv_info *adv, *tmp; int err; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; /* If RPA Resolution has not been enable yet it means the * resolving list is empty and we should attempt to program the * local IRK in order to support using own_addr_type * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). */ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { hci_le_add_resolve_list_sync(hdev, NULL); hci_le_set_addr_resolution_enable_sync(hdev, 0x01); } /* Make sure the controller has a good default for * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) { err = hci_setup_ext_adv_instance_sync(hdev, 0x00); if (!err) hci_update_scan_rsp_data_sync(hdev, 0x00); } else { err = hci_update_adv_data_sync(hdev, 0x00); if (!err) hci_update_scan_rsp_data_sync(hdev, 0x00); } if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) hci_enable_advertising_sync(hdev); } /* Call for each tracked instance to be scheduled */ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) hci_schedule_adv_instance_sync(hdev, adv->instance, true); return 0; } static int hci_write_auth_enable_sync(struct hci_dev *hdev) { u8 link_sec; link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(link_sec), &link_sec, HCI_CMD_TIMEOUT); } int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) { struct hci_cp_write_page_scan_activity cp; u8 type; int err = 0; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; memset(&cp, 0, sizeof(cp)); if (enable) { type = PAGE_SCAN_TYPE_INTERLACED; /* 160 msec page scan interval */ cp.interval = cpu_to_le16(0x0100); } else { type = hdev->def_page_scan_type; cp.interval = cpu_to_le16(hdev->def_page_scan_int); } cp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || __cpu_to_le16(hdev->page_scan_window) != cp.window) { err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; } if (hdev->page_scan_type != type) err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, sizeof(type), &type, HCI_CMD_TIMEOUT); return err; } static bool disconnected_accept_list_entries(struct hci_dev *hdev) { struct bdaddr_list *b; list_for_each_entry(b, &hdev->accept_list, list) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); if (!conn) return true; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) return true; } return false; } static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) { return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(val), &val, HCI_CMD_TIMEOUT); } int hci_update_scan_sync(struct hci_dev *hdev) { u8 scan; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (!hdev_is_powered(hdev)) return 0; if (mgmt_powering_down(hdev)) return 0; if (hdev->scanning_paused) return 0; if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || disconnected_accept_list_entries(hdev)) scan = SCAN_PAGE; else scan = SCAN_DISABLED; if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) scan |= SCAN_INQUIRY; if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) return 0; return hci_write_scan_enable_sync(hdev, scan); } int hci_update_name_sync(struct hci_dev *hdev) { struct hci_cp_write_local_name cp; memset(&cp, 0, sizeof(cp)); memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* This function perform powered update HCI command sequence after the HCI init * sequence which end up resetting all states, the sequence is as follows: * * HCI_SSP_ENABLED(Enable SSP) * HCI_LE_ENABLED(Enable LE) * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) -> * Update adv data) * Enable Authentication * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> * Set Name -> Set EIR) * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) */ int hci_powered_update_sync(struct hci_dev *hdev) { int err; /* Register the available SMP channels (BR/EDR and LE) only when * successfully powering on the controller. This late * registration is required so that LE SMP can clearly decide if * the public address or static address is used. */ smp_register(hdev); err = hci_write_ssp_mode_sync(hdev, 0x01); if (err) return err; err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); if (err) return err; err = hci_powered_update_adv_sync(hdev); if (err) return err; err = hci_write_auth_enable_sync(hdev); if (err) return err; if (lmp_bredr_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) hci_write_fast_connectable_sync(hdev, true); else hci_write_fast_connectable_sync(hdev, false); hci_update_scan_sync(hdev); hci_update_class_sync(hdev); hci_update_name_sync(hdev); hci_update_eir_sync(hdev); } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { if (bacmp(&hdev->static_addr, BDADDR_ANY)) return hci_set_random_addr_sync(hdev, &hdev->static_addr); } return 0; } /** * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address * (BD_ADDR) for a HCI device from * a firmware node property. * @hdev: The HCI device * * Search the firmware node for 'local-bd-address'. * * All-zero BD addresses are rejected, because those could be properties * that exist in the firmware tables, but were not updated by the firmware. For * example, the DTS could define 'local-bd-address', with zero BD addresses. */ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) { struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); bdaddr_t ba; int ret; ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", (u8 *)&ba, sizeof(ba)); if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) return; if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks)) baswap(&hdev->public_addr, &ba); else bacpy(&hdev->public_addr, &ba); } struct hci_init_stage { int (*func)(struct hci_dev *hdev); }; /* Run init stage NULL terminated function table */ static int hci_init_stage_sync(struct hci_dev *hdev, const struct hci_init_stage *stage) { size_t i; for (i = 0; stage[i].func; i++) { int err; err = stage[i].func(hdev); if (err) return err; } return 0; } /* Read Local Version */ static int hci_read_local_version_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_CMD_TIMEOUT); } /* Read BD Address */ static int hci_read_bd_addr_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_CMD_TIMEOUT); } #define HCI_INIT(_func) \ { \ .func = _func, \ } static const struct hci_init_stage hci_init0[] = { /* HCI_OP_READ_LOCAL_VERSION */ HCI_INIT(hci_read_local_version_sync), /* HCI_OP_READ_BD_ADDR */ HCI_INIT(hci_read_bd_addr_sync), {} }; int hci_reset_sync(struct hci_dev *hdev) { int err; set_bit(HCI_RESET, &hdev->flags); err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); if (err) return err; return 0; } static int hci_init0_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); /* Reset */ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { err = hci_reset_sync(hdev); if (err) return err; } return hci_init_stage_sync(hdev, hci_init0); } static int hci_unconf_init_sync(struct hci_dev *hdev) { int err; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return 0; err = hci_init0_sync(hdev); if (err < 0) return err; if (hci_dev_test_flag(hdev, HCI_SETUP)) hci_debugfs_create_basic(hdev); return 0; } /* Read Local Supported Features. */ static int hci_read_local_features_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } /* BR Controller init stage 1 command sequence */ static const struct hci_init_stage br_init1[] = { /* HCI_OP_READ_LOCAL_FEATURES */ HCI_INIT(hci_read_local_features_sync), /* HCI_OP_READ_LOCAL_VERSION */ HCI_INIT(hci_read_local_version_sync), /* HCI_OP_READ_BD_ADDR */ HCI_INIT(hci_read_bd_addr_sync), {} }; /* Read Local Commands */ static int hci_read_local_cmds_sync(struct hci_dev *hdev) { /* All Bluetooth 1.2 and later controllers should support the * HCI command for reading the local supported commands. * * Unfortunately some controllers indicate Bluetooth 1.2 support, * but do not have support for this command. If that is the case, * the driver can quirk the behavior and skip reading the local * supported commands. */ if (hdev->hci_ver > BLUETOOTH_VER_1_1 && !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL, HCI_CMD_TIMEOUT); return 0; } static int hci_init1_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); /* Reset */ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { err = hci_reset_sync(hdev); if (err) return err; } return hci_init_stage_sync(hdev, br_init1); } /* Read Buffer Size (ACL mtu, max pkt, etc.) */ static int hci_read_buffer_size_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Class of Device */ static int hci_read_dev_class_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Local Name */ static int hci_read_local_name_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { if (!read_voice_setting_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Number of Supported IAC */ static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Current IAC LAP */ static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, u8 cond_type, bdaddr_t *bdaddr, u8 auto_accept) { struct hci_cp_set_event_filter cp; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); cp.flt_type = flt_type; if (flt_type != HCI_FLT_CLEAR_ALL) { cp.cond_type = cond_type; bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); cp.addr_conn_flt.auto_accept = auto_accept; } return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, flt_type == HCI_FLT_CLEAR_ALL ? sizeof(cp.flt_type) : sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_clear_event_filter_sync(struct hci_dev *hdev) { if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) return 0; /* In theory the state machine should not reach here unless * a hci_set_event_filter_sync() call succeeds, but we do * the check both for parity and as a future reminder. */ if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, BDADDR_ANY, 0x00); } /* Connection accept timeout ~20 secs */ static int hci_write_ca_timeout_sync(struct hci_dev *hdev) { __le16 param = cpu_to_le16(0x7d00); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, sizeof(param), &param, HCI_CMD_TIMEOUT); } /* Enable SCO flow control if supported */ static int hci_write_sync_flowctl_sync(struct hci_dev *hdev) { struct hci_cp_write_sync_flowctl cp; int err; /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */ if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) || !test_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x01; err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (!err) hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL); return err; } /* BR Controller init stage 2 command sequence */ static const struct hci_init_stage br_init2[] = { /* HCI_OP_READ_BUFFER_SIZE */ HCI_INIT(hci_read_buffer_size_sync), /* HCI_OP_READ_CLASS_OF_DEV */ HCI_INIT(hci_read_dev_class_sync), /* HCI_OP_READ_LOCAL_NAME */ HCI_INIT(hci_read_local_name_sync), /* HCI_OP_READ_VOICE_SETTING */ HCI_INIT(hci_read_voice_setting_sync), /* HCI_OP_READ_NUM_SUPPORTED_IAC */ HCI_INIT(hci_read_num_supported_iac_sync), /* HCI_OP_READ_CURRENT_IAC_LAP */ HCI_INIT(hci_read_current_iac_lap_sync), /* HCI_OP_SET_EVENT_FLT */ HCI_INIT(hci_clear_event_filter_sync), /* HCI_OP_WRITE_CA_TIMEOUT */ HCI_INIT(hci_write_ca_timeout_sync), /* HCI_OP_WRITE_SYNC_FLOWCTL */ HCI_INIT(hci_write_sync_flowctl_sync), {} }; static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) { u8 mode = 0x01; if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; /* When SSP is available, then the host features page * should also be available as well. However some * controllers list the max_page as 0 as long as SSP * has not been enabled. To achieve proper debugging * output, force the minimum max_page to 1 at least. */ hdev->max_page = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } static int hci_write_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; memset(hdev->eir, 0, sizeof(hdev->eir)); memset(&cp, 0, sizeof(cp)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) { u8 mode; if (!lmp_inq_rssi_capable(hdev) && !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) return 0; /* If Extended Inquiry Result events are supported, then * they are clearly preferred over Inquiry Result with RSSI * events. */ mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) { if (!lmp_inq_tx_pwr_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) { struct hci_cp_read_local_ext_features cp; if (!lmp_ext_feat_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); cp.page = page; return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) { return hci_read_local_ext_features_sync(hdev, 0x01); } /* HCI Controller init stage 2 command sequence */ static const struct hci_init_stage hci_init2[] = { /* HCI_OP_READ_LOCAL_COMMANDS */ HCI_INIT(hci_read_local_cmds_sync), /* HCI_OP_WRITE_SSP_MODE */ HCI_INIT(hci_write_ssp_mode_1_sync), /* HCI_OP_WRITE_EIR */ HCI_INIT(hci_write_eir_sync), /* HCI_OP_WRITE_INQUIRY_MODE */ HCI_INIT(hci_write_inquiry_mode_sync), /* HCI_OP_READ_INQ_RSP_TX_POWER */ HCI_INIT(hci_read_inq_rsp_tx_power_sync), /* HCI_OP_READ_LOCAL_EXT_FEATURES */ HCI_INIT(hci_read_local_ext_features_1_sync), /* HCI_OP_WRITE_AUTH_ENABLE */ HCI_INIT(hci_write_auth_enable_sync), {} }; /* Read LE Buffer Size */ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) { /* Use Read LE Buffer Size V2 if supported */ if (iso_capable(hdev) && hdev->commands[41] & 0x20) return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE_V2, 0, NULL, HCI_CMD_TIMEOUT); return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Local Supported Features */ static int hci_le_read_local_features_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Supported States */ static int hci_le_read_supported_states_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL, HCI_CMD_TIMEOUT); } /* LE Controller init stage 2 command sequence */ static const struct hci_init_stage le_init2[] = { /* HCI_OP_LE_READ_LOCAL_FEATURES */ HCI_INIT(hci_le_read_local_features_sync), /* HCI_OP_LE_READ_BUFFER_SIZE */ HCI_INIT(hci_le_read_buffer_size_sync), /* HCI_OP_LE_READ_SUPPORTED_STATES */ HCI_INIT(hci_le_read_supported_states_sync), {} }; static int hci_init2_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init2); if (err) return err; if (lmp_bredr_capable(hdev)) { err = hci_init_stage_sync(hdev, br_init2); if (err) return err; } else { hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); } if (lmp_le_capable(hdev)) { err = hci_init_stage_sync(hdev, le_init2); if (err) return err; /* LE-only controllers have LE implicitly enabled */ if (!lmp_bredr_capable(hdev)) hci_dev_set_flag(hdev, HCI_LE_ENABLED); } return 0; } static int hci_set_event_mask_sync(struct hci_dev *hdev) { /* The second byte is 0xff instead of 0x9f (two reserved bits * disabled) since a Broadcom 1.2 dongle doesn't respond to the * command otherwise. */ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; /* CSR 1.1 dongles does not accept any bitfield so don't try to set * any event mask for pre 1.2 devices. */ if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; if (lmp_bredr_capable(hdev)) { events[4] |= 0x01; /* Flow Specification Complete */ /* Don't set Disconnect Complete and mode change when * suspended as that would wakeup the host when disconnecting * due to suspend. */ if (hdev->suspended) { events[0] &= 0xef; events[2] &= 0xf7; } } else { /* Use a different default for LE-only devices */ memset(events, 0, sizeof(events)); events[1] |= 0x20; /* Command Complete */ events[1] |= 0x40; /* Command Status */ events[1] |= 0x80; /* Hardware Error */ /* If the controller supports the Disconnect command, enable * the corresponding event. In addition enable packet flow * control related events. */ if (hdev->commands[0] & 0x20) { /* Don't set Disconnect Complete when suspended as that * would wakeup the host when disconnecting due to * suspend. */ if (!hdev->suspended) events[0] |= 0x10; /* Disconnection Complete */ events[2] |= 0x04; /* Number of Completed Packets */ events[3] |= 0x02; /* Data Buffer Overflow */ } /* If the controller supports the Read Remote Version * Information command, enable the corresponding event. */ if (hdev->commands[2] & 0x80) events[1] |= 0x08; /* Read Remote Version Information * Complete */ if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { events[0] |= 0x80; /* Encryption Change */ events[5] |= 0x80; /* Encryption Key Refresh Complete */ } } if (lmp_inq_rssi_capable(hdev) || test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) events[4] |= 0x02; /* Inquiry Result with RSSI */ if (lmp_ext_feat_capable(hdev)) events[4] |= 0x04; /* Read Remote Extended Features Complete */ if (lmp_esco_capable(hdev)) { events[5] |= 0x08; /* Synchronous Connection Complete */ events[5] |= 0x10; /* Synchronous Connection Changed */ } if (lmp_sniffsubr_capable(hdev)) events[5] |= 0x20; /* Sniff Subrating */ if (lmp_pause_enc_capable(hdev)) events[5] |= 0x80; /* Encryption Key Refresh Complete */ if (lmp_ext_inq_capable(hdev)) events[5] |= 0x40; /* Extended Inquiry Result */ if (lmp_no_flush_capable(hdev)) events[7] |= 0x01; /* Enhanced Flush Complete */ if (lmp_lsto_capable(hdev)) events[6] |= 0x80; /* Link Supervision Timeout Changed */ if (lmp_ssp_capable(hdev)) { events[6] |= 0x01; /* IO Capability Request */ events[6] |= 0x02; /* IO Capability Response */ events[6] |= 0x04; /* User Confirmation Request */ events[6] |= 0x08; /* User Passkey Request */ events[6] |= 0x10; /* Remote OOB Data Request */ events[6] |= 0x20; /* Simple Pairing Complete */ events[7] |= 0x04; /* User Passkey Notification */ events[7] |= 0x08; /* Keypress Notification */ events[7] |= 0x10; /* Remote Host Supported * Features Notification */ } if (lmp_le_capable(hdev)) events[7] |= 0x20; /* LE Meta-Event */ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } static int hci_read_stored_link_key_sync(struct hci_dev *hdev) { struct hci_cp_read_stored_link_key cp; if (!(hdev->commands[6] & 0x20) || test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, BDADDR_ANY); cp.read_all = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_setup_link_policy_sync(struct hci_dev *hdev) { struct hci_cp_write_def_link_policy cp; u16 link_policy = 0; if (!(hdev->commands[5] & 0x10)) return 0; memset(&cp, 0, sizeof(cp)); if (lmp_rswitch_capable(hdev)) link_policy |= HCI_LP_RSWITCH; if (lmp_hold_capable(hdev)) link_policy |= HCI_LP_HOLD; if (lmp_sniff_capable(hdev)) link_policy |= HCI_LP_SNIFF; if (lmp_park_capable(hdev)) link_policy |= HCI_LP_PARK; cp.policy = cpu_to_le16(link_policy); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) { if (!(hdev->commands[8] & 0x01)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) { if (!(hdev->commands[18] & 0x04) || !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_page_scan_type_sync(struct hci_dev *hdev) { /* Some older Broadcom based Bluetooth 1.2 controllers do not * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ if (!(hdev->commands[13] & 0x01) || test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read features beyond page 1 if available */ static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) { u8 page; int err; if (!lmp_ext_feat_capable(hdev)) return 0; for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; page++) { err = hci_read_local_ext_features_sync(hdev, page); if (err) return err; } return 0; } /* HCI Controller init stage 3 command sequence */ static const struct hci_init_stage hci_init3[] = { /* HCI_OP_SET_EVENT_MASK */ HCI_INIT(hci_set_event_mask_sync), /* HCI_OP_READ_STORED_LINK_KEY */ HCI_INIT(hci_read_stored_link_key_sync), /* HCI_OP_WRITE_DEF_LINK_POLICY */ HCI_INIT(hci_setup_link_policy_sync), /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ HCI_INIT(hci_read_page_scan_activity_sync), /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ HCI_INIT(hci_read_def_err_data_reporting_sync), /* HCI_OP_READ_PAGE_SCAN_TYPE */ HCI_INIT(hci_read_page_scan_type_sync), /* HCI_OP_READ_LOCAL_EXT_FEATURES */ HCI_INIT(hci_read_local_ext_features_all_sync), {} }; static int hci_le_set_event_mask_sync(struct hci_dev *hdev) { u8 events[8]; if (!lmp_le_capable(hdev)) return 0; memset(events, 0, sizeof(events)); if (hdev->le_features[0] & HCI_LE_ENCRYPTION) events[0] |= 0x10; /* LE Long Term Key Request */ /* If controller supports the Connection Parameters Request * Link Layer Procedure, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) /* LE Remote Connection Parameter Request */ events[0] |= 0x20; /* If the controller supports the Data Length Extension * feature, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) events[0] |= 0x40; /* LE Data Length Change */ /* If the controller supports LL Privacy feature or LE Extended Adv, * enable the corresponding event. */ if (use_enhanced_conn_complete(hdev)) events[1] |= 0x02; /* LE Enhanced Connection Complete */ /* Mark Device Privacy if Privacy Mode is supported */ if (privacy_mode_capable(hdev)) hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; /* Mark Address Resolution if LL Privacy is supported */ if (ll_privacy_capable(hdev)) hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION; /* If the controller supports Extended Scanner Filter * Policies, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) events[1] |= 0x04; /* LE Direct Advertising Report */ /* If the controller supports Channel Selection Algorithm #2 * feature, enable the corresponding event. */ if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) events[2] |= 0x08; /* LE Channel Selection Algorithm */ /* If the controller supports the LE Set Scan Enable command, * enable the corresponding advertising report event. */ if (hdev->commands[26] & 0x08) events[0] |= 0x02; /* LE Advertising Report */ /* If the controller supports the LE Create Connection * command, enable the corresponding event. */ if (hdev->commands[26] & 0x10) events[0] |= 0x01; /* LE Connection Complete */ /* If the controller supports the LE Connection Update * command, enable the corresponding event. */ if (hdev->commands[27] & 0x04) events[0] |= 0x04; /* LE Connection Update Complete */ /* If the controller supports the LE Read Remote Used Features * command, enable the corresponding event. */ if (hdev->commands[27] & 0x20) /* LE Read Remote Used Features Complete */ events[0] |= 0x08; /* If the controller supports the LE Read Local P-256 * Public Key command, enable the corresponding event. */ if (hdev->commands[34] & 0x02) /* LE Read Local P-256 Public Key Complete */ events[0] |= 0x80; /* If the controller supports the LE Generate DHKey * command, enable the corresponding event. */ if (hdev->commands[34] & 0x04) events[1] |= 0x01; /* LE Generate DHKey Complete */ /* If the controller supports the LE Set Default PHY or * LE Set PHY commands, enable the corresponding event. */ if (hdev->commands[35] & (0x20 | 0x40)) events[1] |= 0x08; /* LE PHY Update Complete */ /* If the controller supports LE Set Extended Scan Parameters * and LE Set Extended Scan Enable commands, enable the * corresponding event. */ if (use_ext_scan(hdev)) events[1] |= 0x10; /* LE Extended Advertising Report */ /* If the controller supports the LE Extended Advertising * command, enable the corresponding event. */ if (ext_adv_capable(hdev)) events[2] |= 0x02; /* LE Advertising Set Terminated */ if (cis_capable(hdev)) { events[3] |= 0x01; /* LE CIS Established */ if (cis_peripheral_capable(hdev)) events[3] |= 0x02; /* LE CIS Request */ } if (bis_capable(hdev)) { events[1] |= 0x20; /* LE PA Report */ events[1] |= 0x40; /* LE PA Sync Established */ events[3] |= 0x04; /* LE Create BIG Complete */ events[3] |= 0x08; /* LE Terminate BIG Complete */ events[3] |= 0x10; /* LE BIG Sync Established */ events[3] |= 0x20; /* LE BIG Sync Loss */ events[4] |= 0x02; /* LE BIG Info Advertising Report */ } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } /* Read LE Advertising Channel TX Power */ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) { if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { /* HCI TS spec forbids mixing of legacy and extended * advertising commands wherein READ_ADV_TX_POWER is * also included. So do not call it if extended adv * is supported otherwise controller will return * COMMAND_DISALLOWED for extended commands. */ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL, HCI_CMD_TIMEOUT); } return 0; } /* Read LE Min/Max Tx Power*/ static int hci_le_read_tx_power_sync(struct hci_dev *hdev) { if (!(hdev->commands[38] & 0x80) || test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Accept List Size */ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) { if (!(hdev->commands[26] & 0x40)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Resolving List Size */ static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) { if (!(hdev->commands[34] & 0x40)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Clear LE Resolving List */ static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) { if (!(hdev->commands[34] & 0x20)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, HCI_CMD_TIMEOUT); } /* Set RPA timeout */ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) { __le16 timeout = cpu_to_le16(hdev->rpa_timeout); if (!(hdev->commands[35] & 0x04) || test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, sizeof(timeout), &timeout, HCI_CMD_TIMEOUT); } /* Read LE Maximum Data Length */ static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) { if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Suggested Default Data Length */ static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) { if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Number of Supported Advertising Sets */ static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) { if (!ext_adv_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 0, NULL, HCI_CMD_TIMEOUT); } /* Write LE Host Supported */ static int hci_set_le_support_sync(struct hci_dev *hdev) { struct hci_cp_write_le_host_supported cp; /* LE-only devices do not support explicit enablement */ if (!lmp_bredr_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { cp.le = 0x01; cp.simul = 0x00; } if (cp.le == lmp_host_le_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* LE Set Host Feature */ static int hci_le_set_host_feature_sync(struct hci_dev *hdev) { struct hci_cp_le_set_host_feature cp; if (!cis_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); /* Connected Isochronous Channels (Host Support) */ cp.bit_number = 32; cp.bit_value = 1; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* LE Controller init stage 3 command sequence */ static const struct hci_init_stage le_init3[] = { /* HCI_OP_LE_SET_EVENT_MASK */ HCI_INIT(hci_le_set_event_mask_sync), /* HCI_OP_LE_READ_ADV_TX_POWER */ HCI_INIT(hci_le_read_adv_tx_power_sync), /* HCI_OP_LE_READ_TRANSMIT_POWER */ HCI_INIT(hci_le_read_tx_power_sync), /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ HCI_INIT(hci_le_read_accept_list_size_sync), /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ HCI_INIT(hci_le_clear_accept_list_sync), /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ HCI_INIT(hci_le_read_resolv_list_size_sync), /* HCI_OP_LE_CLEAR_RESOLV_LIST */ HCI_INIT(hci_le_clear_resolv_list_sync), /* HCI_OP_LE_SET_RPA_TIMEOUT */ HCI_INIT(hci_le_set_rpa_timeout_sync), /* HCI_OP_LE_READ_MAX_DATA_LEN */ HCI_INIT(hci_le_read_max_data_len_sync), /* HCI_OP_LE_READ_DEF_DATA_LEN */ HCI_INIT(hci_le_read_def_data_len_sync), /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ HCI_INIT(hci_le_read_num_support_adv_sets_sync), /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ HCI_INIT(hci_set_le_support_sync), /* HCI_OP_LE_SET_HOST_FEATURE */ HCI_INIT(hci_le_set_host_feature_sync), {} }; static int hci_init3_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init3); if (err) return err; if (lmp_le_capable(hdev)) return hci_init_stage_sync(hdev, le_init3); return 0; } static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) { struct hci_cp_delete_stored_link_key cp; /* Some Broadcom based Bluetooth controllers do not support the * Delete Stored Link Key command. They are clearly indicating its * absence in the bit mask of supported commands. * * Check the supported commands and only if the command is marked * as supported send it. If not supported assume that the controller * does not have actual support for stored link keys which makes this * command redundant anyway. * * Some controllers indicate that they support handling deleting * stored link keys, but they don't. The quirk lets a driver * just disable this command. */ if (!(hdev->commands[6] & 0x80) || test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, BDADDR_ANY); cp.delete_all = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) { u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; bool changed = false; /* Set event mask page 2 if the HCI command for it is supported */ if (!(hdev->commands[22] & 0x04)) return 0; /* If Connectionless Peripheral Broadcast central role is supported * enable all necessary events for it. */ if (lmp_cpb_central_capable(hdev)) { events[1] |= 0x40; /* Triggered Clock Capture */ events[1] |= 0x80; /* Synchronization Train Complete */ events[2] |= 0x08; /* Truncated Page Complete */ events[2] |= 0x20; /* CPB Channel Map Change */ changed = true; } /* If Connectionless Peripheral Broadcast peripheral role is supported * enable all necessary events for it. */ if (lmp_cpb_peripheral_capable(hdev)) { events[2] |= 0x01; /* Synchronization Train Received */ events[2] |= 0x02; /* CPB Receive */ events[2] |= 0x04; /* CPB Timeout */ events[2] |= 0x10; /* Peripheral Page Response Timeout */ changed = true; } /* Enable Authenticated Payload Timeout Expired event if supported */ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { events[2] |= 0x80; changed = true; } /* Some Broadcom based controllers indicate support for Set Event * Mask Page 2 command, but then actually do not support it. Since * the default value is all bits set to zero, the command is only * required if the event mask has to be changed. In case no change * to the event mask is needed, skip this command. */ if (!changed) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events, HCI_CMD_TIMEOUT); } /* Read local codec list if the HCI command is supported */ static int hci_read_local_codecs_sync(struct hci_dev *hdev) { if (hdev->commands[45] & 0x04) hci_read_supported_codecs_v2(hdev); else if (hdev->commands[29] & 0x20) hci_read_supported_codecs(hdev); return 0; } /* Read local pairing options if the HCI command is supported */ static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) { if (!(hdev->commands[41] & 0x08)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL, HCI_CMD_TIMEOUT); } /* Get MWS transport configuration if the HCI command is supported */ static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) { if (!mws_transport_config_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL, HCI_CMD_TIMEOUT); } /* Check for Synchronization Train support */ static int hci_read_sync_train_params_sync(struct hci_dev *hdev) { if (!lmp_sync_train_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL, HCI_CMD_TIMEOUT); } /* Enable Secure Connections if supported and configured */ static int hci_write_sc_support_1_sync(struct hci_dev *hdev) { u8 support = 0x01; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || !bredr_sc_enabled(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, sizeof(support), &support, HCI_CMD_TIMEOUT); } /* Set erroneous data reporting if supported to the wideband speech * setting value */ static int hci_set_err_data_report_sync(struct hci_dev *hdev) { struct hci_cp_write_def_err_data_reporting cp; bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); if (!(hdev->commands[18] & 0x08) || !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) return 0; if (enabled == hdev->err_data_reporting) return 0; memset(&cp, 0, sizeof(cp)); cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : ERR_DATA_REPORTING_DISABLED; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static const struct hci_init_stage hci_init4[] = { /* HCI_OP_DELETE_STORED_LINK_KEY */ HCI_INIT(hci_delete_stored_link_key_sync), /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ HCI_INIT(hci_set_event_mask_page_2_sync), /* HCI_OP_READ_LOCAL_CODECS */ HCI_INIT(hci_read_local_codecs_sync), /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ HCI_INIT(hci_read_local_pairing_opts_sync), /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ HCI_INIT(hci_get_mws_transport_config_sync), /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ HCI_INIT(hci_read_sync_train_params_sync), /* HCI_OP_WRITE_SC_SUPPORT */ HCI_INIT(hci_write_sc_support_1_sync), /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ HCI_INIT(hci_set_err_data_report_sync), {} }; /* Set Suggested Default Data Length to maximum if supported */ static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) { struct hci_cp_le_write_def_data_len cp; if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; memset(&cp, 0, sizeof(cp)); cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Set Default PHY parameters if command is supported, enables all supported * PHYs according to the LE Features bits. */ static int hci_le_set_default_phy_sync(struct hci_dev *hdev) { struct hci_cp_le_set_default_phy cp; if (!(hdev->commands[35] & 0x20)) { /* If the command is not supported it means only 1M PHY is * supported. */ hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; return 0; } memset(&cp, 0, sizeof(cp)); cp.all_phys = 0x00; cp.tx_phys = HCI_LE_SET_PHY_1M; cp.rx_phys = HCI_LE_SET_PHY_1M; /* Enables 2M PHY if supported */ if (le_2m_capable(hdev)) { cp.tx_phys |= HCI_LE_SET_PHY_2M; cp.rx_phys |= HCI_LE_SET_PHY_2M; } /* Enables Coded PHY if supported */ if (le_coded_capable(hdev)) { cp.tx_phys |= HCI_LE_SET_PHY_CODED; cp.rx_phys |= HCI_LE_SET_PHY_CODED; } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static const struct hci_init_stage le_init4[] = { /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ HCI_INIT(hci_le_set_write_def_data_len_sync), /* HCI_OP_LE_SET_DEFAULT_PHY */ HCI_INIT(hci_le_set_default_phy_sync), {} }; static int hci_init4_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init4); if (err) return err; if (lmp_le_capable(hdev)) return hci_init_stage_sync(hdev, le_init4); return 0; } static int hci_init_sync(struct hci_dev *hdev) { int err; err = hci_init1_sync(hdev); if (err < 0) return err; if (hci_dev_test_flag(hdev, HCI_SETUP)) hci_debugfs_create_basic(hdev); err = hci_init2_sync(hdev); if (err < 0) return err; err = hci_init3_sync(hdev); if (err < 0) return err; err = hci_init4_sync(hdev); if (err < 0) return err; /* This function is only called when the controller is actually in * configured state. When the controller is marked as unconfigured, * this initialization procedure is not run. * * It means that it is possible that a controller runs through its * setup phase and then discovers missing settings. If that is the * case, then this function will not be called. It then will only * be called during the config phase. * * So only when in setup phase or config phase, create the debugfs * entries and register the SMP channels. */ if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) return 0; if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) return 0; hci_debugfs_create_common(hdev); if (lmp_bredr_capable(hdev)) hci_debugfs_create_bredr(hdev); if (lmp_le_capable(hdev)) hci_debugfs_create_le(hdev); return 0; } #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } static const struct { unsigned long quirk; const char *desc; } hci_broken_table[] = { HCI_QUIRK_BROKEN(LOCAL_COMMANDS, "HCI Read Local Supported Commands not supported"), HCI_QUIRK_BROKEN(STORED_LINK_KEY, "HCI Delete Stored Link Key command is advertised, " "but not supported."), HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, "HCI Read Default Erroneous Data Reporting command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, "HCI Read Transmit Power Level command is advertised, " "but not supported."), HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, "HCI Set Event Filter command not supported."), HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, "HCI Enhanced Setup Synchronous Connection command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, "HCI LE Set Random Private Address Timeout command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(EXT_CREATE_CONN, "HCI LE Extended Create Connection command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT, "HCI WRITE AUTH PAYLOAD TIMEOUT command leads " "to unexpected SMP errors when pairing " "and will not be used."), HCI_QUIRK_BROKEN(LE_CODED, "HCI LE Coded PHY feature bit is set, " "but its usage is not supported.") }; /* This function handles hdev setup stage: * * Calls hdev->setup * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. */ static int hci_dev_setup_sync(struct hci_dev *hdev) { int ret = 0; bool invalid_bdaddr; size_t i; if (!hci_dev_test_flag(hdev, HCI_SETUP) && !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) return 0; bt_dev_dbg(hdev, ""); hci_sock_dev_event(hdev, HCI_DEV_SETUP); if (hdev->setup) ret = hdev->setup(hdev); for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); } /* The transport driver can set the quirk to mark the * BD_ADDR invalid before creating the HCI device or in * its setup callback. */ invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); if (!ret) { if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && !bacmp(&hdev->public_addr, BDADDR_ANY)) hci_dev_get_bd_addr_from_property(hdev); if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && hdev->set_bdaddr) { ret = hdev->set_bdaddr(hdev, &hdev->public_addr); if (!ret) invalid_bdaddr = false; } } /* The transport driver can set these quirks before * creating the HCI device or in its setup callback. * * For the invalid BD_ADDR quirk it is possible that * it becomes a valid address if the bootloader does * provide it (see above). * * In case any of them is set, the controller has to * start up as unconfigured. */ if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || invalid_bdaddr) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* For an unconfigured controller it is required to * read at least the version information provided by * the Read Local Version Information command. * * If the set_bdaddr driver callback is provided, then * also the original Bluetooth public device address * will be read using the Read BD Address command. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return hci_unconf_init_sync(hdev); return ret; } /* This function handles hdev init stage: * * Calls hci_dev_setup_sync to perform setup stage * Calls hci_init_sync to perform HCI command init sequence */ static int hci_dev_init_sync(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); atomic_set(&hdev->cmd_cnt, 1); set_bit(HCI_INIT, &hdev->flags); ret = hci_dev_setup_sync(hdev); if (hci_dev_test_flag(hdev, HCI_CONFIG)) { /* If public address change is configured, ensure that * the address gets programmed. If the driver does not * support changing the public address, fail the power * on procedure. */ if (bacmp(&hdev->public_addr, BDADDR_ANY) && hdev->set_bdaddr) ret = hdev->set_bdaddr(hdev, &hdev->public_addr); else ret = -EADDRNOTAVAIL; } if (!ret) { if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { ret = hci_init_sync(hdev); if (!ret && hdev->post_init) ret = hdev->post_init(hdev); } } /* If the HCI Reset command is clearing all diagnostic settings, * then they need to be reprogrammed after the init procedure * completed. */ if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) ret = hdev->set_diag(hdev, true); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { msft_do_open(hdev); aosp_do_open(hdev); } clear_bit(HCI_INIT, &hdev->flags); return ret; } int hci_dev_open_sync(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { ret = -ENODEV; goto done; } if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { /* Check for rfkill but allow the HCI setup stage to * proceed (which in itself doesn't cause any RF activity). */ if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { ret = -ERFKILL; goto done; } /* Check for valid public address or a configured static * random address, but let the HCI setup proceed to * be able to determine if there is a public address * or not. * * In case of user channel usage, it is not important * if a public address or static random address is * available. */ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY)) { ret = -EADDRNOTAVAIL; goto done; } } if (test_bit(HCI_UP, &hdev->flags)) { ret = -EALREADY; goto done; } if (hdev->open(hdev)) { ret = -EIO; goto done; } hci_devcd_reset(hdev); set_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_OPEN); ret = hci_dev_init_sync(hdev); if (!ret) { hci_dev_hold(hdev); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, true); set_bit(HCI_UP, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_UP); hci_leds_update_powered(hdev, true); if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG) && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT)) { ret = hci_powered_update_sync(hdev); mgmt_power_on(hdev, ret); } } else { /* Init failed, cleanup */ flush_work(&hdev->tx_work); /* Since hci_rx_work() is possible to awake new cmd_work * it should be flushed first to avoid unexpected call of * hci_cmd_work() */ flush_work(&hdev->rx_work); flush_work(&hdev->cmd_work); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->rx_q); if (hdev->flush) hdev->flush(hdev); if (hdev->sent_cmd) { cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } if (hdev->req_skb) { kfree_skb(hdev->req_skb); hdev->req_skb = NULL; } clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); hdev->close(hdev); hdev->flags &= BIT(HCI_RAW); } done: return ret; } /* This function requires the caller holds hdev->lock */ static void hci_pend_le_actions_clear(struct hci_dev *hdev) { struct hci_conn_params *p; list_for_each_entry(p, &hdev->le_conn_params, list) { hci_pend_le_list_del_init(p); if (p->conn) { hci_conn_drop(p->conn); hci_conn_put(p->conn); p->conn = NULL; } } BT_DBG("All LE pending actions cleared"); } static int hci_dev_shutdown(struct hci_dev *hdev) { int err = 0; /* Similar to how we first do setup and then set the exclusive access * bit for userspace, we must first unset userchannel and then clean up. * Otherwise, the kernel can't properly use the hci channel to clean up * the controller (some shutdown routines require sending additional * commands to the controller for example). */ bool was_userchannel = hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && test_bit(HCI_UP, &hdev->flags)) { /* Execute vendor specific shutdown routine */ if (hdev->shutdown) err = hdev->shutdown(hdev); } if (was_userchannel) hci_dev_set_flag(hdev, HCI_USER_CHANNEL); return err; } int hci_dev_close_sync(struct hci_dev *hdev) { bool auto_off; int err = 0; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { disable_delayed_work(&hdev->power_off); disable_delayed_work(&hdev->ncmd_timer); disable_delayed_work(&hdev->le_scan_disable); } else { cancel_delayed_work(&hdev->power_off); cancel_delayed_work(&hdev->ncmd_timer); cancel_delayed_work(&hdev->le_scan_disable); } hci_cmd_sync_cancel_sync(hdev, ENODEV); cancel_interleave_scan(hdev); if (hdev->adv_instance_timeout) { cancel_delayed_work_sync(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } err = hci_dev_shutdown(hdev); if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { cancel_delayed_work_sync(&hdev->cmd_timer); return err; } hci_leds_update_powered(hdev, false); /* Flush RX and TX works */ flush_work(&hdev->tx_work); flush_work(&hdev->rx_work); if (hdev->discov_timeout > 0) { hdev->discov_timeout = 0; hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); } if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) cancel_delayed_work(&hdev->service_cache); if (hci_dev_test_flag(hdev, HCI_MGMT)) { struct adv_info *adv_instance; cancel_delayed_work_sync(&hdev->rpa_expired); list_for_each_entry(adv_instance, &hdev->adv_instances, list) cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); } /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ drain_workqueue(hdev->workqueue); hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT)) __mgmt_power_off(hdev); hci_inquiry_cache_flush(hdev); hci_pend_le_actions_clear(hdev); hci_conn_hash_flush(hdev); /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ smp_unregister(hdev); hci_dev_unlock(hdev); hci_sock_dev_event(hdev, HCI_DEV_DOWN); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { aosp_do_close(hdev); msft_do_close(hdev); } if (hdev->flush) hdev->flush(hdev); /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { set_bit(HCI_INIT, &hdev->flags); hci_reset_sync(hdev); clear_bit(HCI_INIT, &hdev->flags); } /* flush cmd work */ flush_work(&hdev->cmd_work); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->raw_q); /* Drop last sent command */ if (hdev->sent_cmd) { cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } /* Drop last request */ if (hdev->req_skb) { kfree_skb(hdev->req_skb); hdev->req_skb = NULL; } clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); /* After this point our queues are empty and no tasks are scheduled. */ hdev->close(hdev); /* Clear flags */ hdev->flags &= BIT(HCI_RAW); hci_dev_clear_volatile_flags(hdev); memset(hdev->eir, 0, sizeof(hdev->eir)); memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); bacpy(&hdev->random_addr, BDADDR_ANY); hci_codec_list_clear(&hdev->local_codecs); hci_dev_put(hdev); return err; } /* This function perform power on HCI command sequence as follows: * * If controller is already up (HCI_UP) performs hci_powered_update_sync * sequence otherwise run hci_dev_open_sync which will follow with * hci_powered_update_sync after the init sequence is completed. */ static int hci_power_on_sync(struct hci_dev *hdev) { int err; if (test_bit(HCI_UP, &hdev->flags) && hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); return hci_powered_update_sync(hdev); } err = hci_dev_open_sync(hdev); if (err < 0) return err; /* During the HCI setup phase, a few error conditions are * ignored and they need to be checked now. If they are still * valid, it is important to return the device back off. */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_close_sync(hdev); } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); } if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) set_bit(HCI_RAW, &hdev->flags); /* For fully configured devices, this will send * the Index Added event. For unconfigured devices, * it will send Unconfigued Index Added event. * * Devices with HCI_QUIRK_RAW_DEVICE are ignored * and no event will be send. */ mgmt_index_added(hdev); } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag. */ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) clear_bit(HCI_RAW, &hdev->flags); /* Powering on the controller with HCI_CONFIG set only * happens with the transition from unconfigured to * configured. This will send the Index Added event. */ mgmt_index_added(hdev); } return 0; } static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) { struct hci_cp_remote_name_req_cancel cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, addr); return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_stop_discovery_sync(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; struct inquiry_entry *e; int err; bt_dev_dbg(hdev, "state %u", hdev->discovery.state); if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { if (test_bit(HCI_INQUIRY, &hdev->flags)) { err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); if (err) return err; } if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); err = hci_scan_disable_sync(hdev); if (err) return err; } } else { err = hci_scan_disable_sync(hdev); if (err) return err; } /* Resume advertising if it was paused */ if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* No further actions needed for LE-only discovery */ if (d->type == DISCOV_TYPE_LE) return 0; if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING); if (!e) return 0; /* Ignore cancel errors since it should interfere with stopping * of the discovery. */ hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); } return 0; } static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_disconnect cp; if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { /* This is a BIS connection, hci_conn_del will * do the necessary cleanup. */ hci_dev_lock(hdev); hci_conn_failed(conn, reason); hci_dev_unlock(hdev); return 0; } memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is * used when suspending or powering off, where we don't want to wait * for the peer's response. */ if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_EV_DISCONN_COMPLETE, HCI_CMD_TIMEOUT, NULL); return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { /* Return reason if scanning since the connection shall probably be * cleanup directly. */ if (test_bit(HCI_CONN_SCANNING, &conn->flags)) return reason; if (conn->role == HCI_ROLE_SLAVE || test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { if (conn->type == LE_LINK) return hci_le_connect_cancel_sync(hdev, conn, reason); if (conn->type == ISO_LINK) { /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 1857: * * If this command is issued for a CIS on the Central and the * CIS is successfully terminated before being established, * then an HCI_LE_CIS_Established event shall also be sent for * this CIS with the Status Operation Cancelled by Host (0x44). */ if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) return hci_disconnect_sync(hdev, conn, reason); /* CIS with no Create CIS sent have nothing to cancel */ if (bacmp(&conn->dst, BDADDR_ANY)) return HCI_ERROR_LOCAL_HOST_TERM; /* There is no way to cancel a BIS without terminating the BIG * which is done later on connection cleanup. */ return 0; } if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is * used when suspending or powering off, where we don't want to wait * for the peer's response. */ if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_EV_CONN_COMPLETE, HCI_CMD_TIMEOUT, NULL); return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_CMD_TIMEOUT); } static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_sync_conn_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.reason = reason; /* SCO rejection has its own limited set of * allowed error values (0x0D-0x0F). */ if (reason < 0x0d || reason > 0x0f) cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_le_reject_cis cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_conn_req cp; if (conn->type == ISO_LINK) return hci_le_reject_cis_sync(hdev, conn, reason); if (conn->type == SCO_LINK || conn->type == ESCO_LINK) return hci_reject_sco_sync(hdev, conn, reason); memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { int err = 0; u16 handle = conn->handle; bool disconnect = false; struct hci_conn *c; switch (conn->state) { case BT_CONNECTED: case BT_CONFIG: err = hci_disconnect_sync(hdev, conn, reason); break; case BT_CONNECT: err = hci_connect_cancel_sync(hdev, conn, reason); break; case BT_CONNECT2: err = hci_reject_conn_sync(hdev, conn, reason); break; case BT_OPEN: case BT_BOUND: break; default: disconnect = true; break; } hci_dev_lock(hdev); /* Check if the connection has been cleaned up concurrently */ c = hci_conn_hash_lookup_handle(hdev, handle); if (!c || c != conn) { err = 0; goto unlock; } /* Cleanup hci_conn object if it cannot be cancelled as it * likelly means the controller and host stack are out of sync * or in case of LE it was still scanning so it can be cleanup * safely. */ if (disconnect) { conn->state = BT_CLOSED; hci_disconn_cfm(conn, reason); hci_conn_del(conn); } else { hci_conn_failed(conn, reason); } unlock: hci_dev_unlock(hdev); return err; } static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) { struct list_head *head = &hdev->conn_hash.list; struct hci_conn *conn; rcu_read_lock(); while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { /* Make sure the connection is not freed while unlocking */ conn = hci_conn_get(conn); rcu_read_unlock(); /* Disregard possible errors since hci_conn_del shall have been * called even in case of errors had occurred since it would * then cause hci_conn_failed to be called which calls * hci_conn_del internally. */ hci_abort_conn_sync(hdev, conn, reason); hci_conn_put(conn); rcu_read_lock(); } rcu_read_unlock(); return 0; } /* This function perform power off HCI command sequence as follows: * * Clear Advertising * Stop Discovery * Disconnect all connections * hci_dev_close_sync */ static int hci_power_off_sync(struct hci_dev *hdev) { int err; /* If controller is already down there is nothing to do */ if (!test_bit(HCI_UP, &hdev->flags)) return 0; hci_dev_set_flag(hdev, HCI_POWERING_DOWN); if (test_bit(HCI_ISCAN, &hdev->flags) || test_bit(HCI_PSCAN, &hdev->flags)) { err = hci_write_scan_enable_sync(hdev, 0x00); if (err) goto out; } err = hci_clear_adv_sync(hdev, NULL, false); if (err) goto out; err = hci_stop_discovery_sync(hdev); if (err) goto out; /* Terminated due to Power Off */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) goto out; err = hci_dev_close_sync(hdev); out: hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); return err; } int hci_set_powered_sync(struct hci_dev *hdev, u8 val) { if (val) return hci_power_on_sync(hdev); return hci_power_off_sync(hdev); } static int hci_write_iac_sync(struct hci_dev *hdev) { struct hci_cp_write_current_iac_lap cp; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return 0; memset(&cp, 0, sizeof(cp)); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { /* Limited discoverable mode */ cp.num_iac = min_t(u8, hdev->num_iac, 2); cp.iac_lap[0] = 0x00; /* LIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; cp.iac_lap[3] = 0x33; /* GIAC */ cp.iac_lap[4] = 0x8b; cp.iac_lap[5] = 0x9e; } else { /* General discoverable mode */ cp.num_iac = 1; cp.iac_lap[0] = 0x33; /* GIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; } return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, (cp.num_iac * 3) + 1, &cp, HCI_CMD_TIMEOUT); } int hci_update_discoverable_sync(struct hci_dev *hdev) { int err = 0; if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = hci_write_iac_sync(hdev); if (err) return err; err = hci_update_scan_sync(hdev); if (err) return err; err = hci_update_class_sync(hdev); if (err) return err; } /* Advertising instances don't use the global discoverable setting, so * only update AD if advertising was enabled using Set Advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { err = hci_update_adv_data_sync(hdev, 0x00); if (err) return err; /* Discoverable mode affects the local advertising * address in limited privacy mode. */ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { if (ext_adv_capable(hdev)) err = hci_start_ext_adv_sync(hdev, 0x00); else err = hci_enable_advertising_sync(hdev); } } return err; } static int update_discoverable_sync(struct hci_dev *hdev, void *data) { return hci_update_discoverable_sync(hdev); } int hci_update_discoverable(struct hci_dev *hdev) { /* Only queue if it would have any effect */ if (hdev_is_powered(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING) && hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, NULL); return 0; } int hci_update_connectable_sync(struct hci_dev *hdev) { int err; err = hci_update_scan_sync(hdev); if (err) return err; /* If BR/EDR is not enabled and we disable advertising as a * by-product of disabling connectable, we need to update the * advertising flags. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); /* Update the advertising parameters if necessary */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) err = hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance); else err = hci_enable_advertising_sync(hdev); if (err) return err; } return hci_update_passive_scan_sync(hdev); } int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp) { const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; struct hci_cp_inquiry cp; bt_dev_dbg(hdev, ""); if (test_bit(HCI_INQUIRY, &hdev->flags)) return 0; hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_dev_unlock(hdev); memset(&cp, 0, sizeof(cp)); if (hdev->discovery.limited) memcpy(&cp.lap, liac, sizeof(cp.lap)); else memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; cp.num_rsp = num_rsp; return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) { u8 own_addr_type; /* Accept list is not used for discovery */ u8 filter_policy = 0x00; /* Default is to enable duplicates filter */ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; int err; bt_dev_dbg(hdev, ""); /* If controller is scanning, it means the passive scanning is * running. Thus, we should temporarily stop it in order to set the * discovery scanning parameters. */ err = hci_scan_disable_sync(hdev); if (err) { bt_dev_err(hdev, "Unable to disable scanning: %d", err); return err; } cancel_interleave_scan(hdev); /* Pause address resolution for active scan and stop advertising if * privacy is enabled. */ err = hci_pause_addr_resolution(hdev); if (err) goto failed; /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable * private address. */ err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), &own_addr_type); if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; if (hci_is_adv_monitoring(hdev) || (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && hdev->discovery.result_filtering)) { /* Duplicate filter should be disabled when some advertisement * monitor is activated, otherwise AdvMon can only receive one * advertisement for one peer(*) during active scanning, and * might report loss to these peers. * * If controller does strict duplicate filtering and the * discovery requires result filtering disables controller based * filtering since that can cause reports that would match the * host filter to not be reported. */ filter_dup = LE_SCAN_FILTER_DUP_DISABLE; } err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, hdev->le_scan_window_discovery, own_addr_type, filter_policy, filter_dup); if (!err) return err; failed: /* Resume advertising if it was paused */ if (ll_privacy_capable(hdev)) hci_resume_advertising_sync(hdev); /* Resume passive scanning */ hci_update_passive_scan_sync(hdev); return err; } static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); if (err) return err; return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); } int hci_start_discovery_sync(struct hci_dev *hdev) { unsigned long timeout; int err; bt_dev_dbg(hdev, "type %u", hdev->discovery.type); switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry * and LE scanning are scheduled by the controller. * * For interleaving discovery in comparison, BR/EDR inquiry * and LE scanning are done sequentially with separate * timeouts. */ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); /* During simultaneous discovery, we double LE scan * interval. We must leave some time for the controller * to do BR/EDR inquiry. */ err = hci_start_interleaved_discovery_sync(hdev); break; } timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); break; case DISCOV_TYPE_LE: timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); break; default: return -EINVAL; } if (err) return err; bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); return 0; } static void hci_suspend_monitor_sync(struct hci_dev *hdev) { switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_suspend_sync(hdev); break; default: return; } } /* This function disables discovery and mark it as paused */ static int hci_pause_discovery_sync(struct hci_dev *hdev) { int old_state = hdev->discovery.state; int err; /* If discovery already stopped/stopping/paused there nothing to do */ if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || hdev->discovery_paused) return 0; hci_discovery_set_state(hdev, DISCOVERY_STOPPING); err = hci_stop_discovery_sync(hdev); if (err) return err; hdev->discovery_paused = true; hci_discovery_set_state(hdev, DISCOVERY_STOPPED); return 0; } static int hci_update_event_filter_sync(struct hci_dev *hdev) { struct bdaddr_list_with_flags *b; u8 scan = SCAN_DISABLED; bool scanning = test_bit(HCI_PSCAN, &hdev->flags); int err; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; /* Some fake CSR controllers lock up after setting this type of * filter, so avoid sending the request altogether. */ if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; /* Always clear event filter when starting */ hci_clear_event_filter_sync(hdev); list_for_each_entry(b, &hdev->accept_list, list) { if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) continue; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, HCI_CONN_SETUP_ALLOW_BDADDR, &b->bdaddr, HCI_CONN_SETUP_AUTO_ON); if (err) bt_dev_dbg(hdev, "Failed to set event filter for %pMR", &b->bdaddr); else scan = SCAN_PAGE; } if (scan && !scanning) hci_write_scan_enable_sync(hdev, scan); else if (!scan && scanning) hci_write_scan_enable_sync(hdev, scan); return 0; } /* This function disables scan (BR and LE) and mark it as paused */ static int hci_pause_scan_sync(struct hci_dev *hdev) { if (hdev->scanning_paused) return 0; /* Disable page scan if enabled */ if (test_bit(HCI_PSCAN, &hdev->flags)) hci_write_scan_enable_sync(hdev, SCAN_DISABLED); hci_scan_disable_sync(hdev); hdev->scanning_paused = true; return 0; } /* This function performs the HCI suspend procedures in the follow order: * * Pause discovery (active scanning/inquiry) * Pause Directed Advertising/Advertising * Pause Scanning (passive scanning in case discovery was not active) * Disconnect all connections * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup * otherwise: * Update event mask (only set events that are allowed to wake up the host) * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) * Update passive scanning (lower duty cycle) * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE */ int hci_suspend_sync(struct hci_dev *hdev) { int err; /* If marked as suspended there nothing to do */ if (hdev->suspended) return 0; /* Mark device as suspended */ hdev->suspended = true; /* Pause discovery if not already stopped */ hci_pause_discovery_sync(hdev); /* Pause other advertisements */ hci_pause_advertising_sync(hdev); /* Suspend monitor filters */ hci_suspend_monitor_sync(hdev); /* Prevent disconnects from causing scanning to be re-enabled */ hci_pause_scan_sync(hdev); if (hci_conn_count(hdev)) { /* Soft disconnect everything (power off) */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) { /* Set state to BT_RUNNING so resume doesn't notify */ hdev->suspend_state = BT_RUNNING; hci_resume_sync(hdev); return err; } /* Update event mask so only the allowed event can wakeup the * host. */ hci_set_event_mask_sync(hdev); } /* Only configure accept list if disconnect succeeded and wake * isn't being prevented. */ if (!hdev->wakeup || !hdev->wakeup(hdev)) { hdev->suspend_state = BT_SUSPEND_DISCONNECT; return 0; } /* Unpause to take care of updating scanning params */ hdev->scanning_paused = false; /* Enable event filter for paired devices */ hci_update_event_filter_sync(hdev); /* Update LE passive scan if enabled */ hci_update_passive_scan_sync(hdev); /* Pause scan changes again. */ hdev->scanning_paused = true; hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; return 0; } /* This function resumes discovery */ static int hci_resume_discovery_sync(struct hci_dev *hdev) { int err; /* If discovery not paused there nothing to do */ if (!hdev->discovery_paused) return 0; hdev->discovery_paused = false; hci_discovery_set_state(hdev, DISCOVERY_STARTING); err = hci_start_discovery_sync(hdev); hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : DISCOVERY_FINDING); return err; } static void hci_resume_monitor_sync(struct hci_dev *hdev) { switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_resume_sync(hdev); break; default: return; } } /* This function resume scan and reset paused flag */ static int hci_resume_scan_sync(struct hci_dev *hdev) { if (!hdev->scanning_paused) return 0; hdev->scanning_paused = false; hci_update_scan_sync(hdev); /* Reset passive scanning to normal */ hci_update_passive_scan_sync(hdev); return 0; } /* This function performs the HCI suspend procedures in the follow order: * * Restore event mask * Clear event filter * Update passive scanning (normal duty cycle) * Resume Directed Advertising/Advertising * Resume discovery (active scanning/inquiry) */ int hci_resume_sync(struct hci_dev *hdev) { /* If not marked as suspended there nothing to do */ if (!hdev->suspended) return 0; hdev->suspended = false; /* Restore event mask */ hci_set_event_mask_sync(hdev); /* Clear any event filters and restore scan state */ hci_clear_event_filter_sync(hdev); /* Resume scanning */ hci_resume_scan_sync(hdev); /* Resume monitor filters */ hci_resume_monitor_sync(hdev); /* Resume other advertisements */ hci_resume_advertising_sync(hdev); /* Resume discovery */ hci_resume_discovery_sync(hdev); return 0; } static bool conn_use_rpa(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_PRIVACY); } static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_ext_adv_params cp; int err; bdaddr_t random_addr; u8 own_addr_type; err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (err) return err; /* Set require_privacy to false so that the remote device has a * chance of identifying us. */ err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, &own_addr_type, &random_addr); if (err) return err; memset(&cp, 0, sizeof(cp)); cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = HCI_TX_POWER_INVALID; cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; cp.handle = 0x00; /* Use instance 0 for directed adv */ cp.own_addr_type = own_addr_type; cp.peer_addr_type = conn->dst_type; bacpy(&cp.peer_addr, &conn->dst); /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for * advertising_event_property LE_LEGACY_ADV_DIRECT_IND * does not supports advertising data when the advertising set already * contains some, the controller shall return erroc code 'Invalid * HCI Command Parameters(0x12). * So it is required to remove adv set for handle 0x00. since we use * instance 0 for directed adv. */ err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); if (err) return err; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; /* Check if random address need to be updated */ if (own_addr_type == ADDR_LE_DEV_RANDOM && bacmp(&random_addr, BDADDR_ANY) && bacmp(&random_addr, &hdev->random_addr)) { err = hci_set_adv_set_random_addr_sync(hdev, 0x00, &random_addr); if (err) return err; } return hci_enable_ext_advertising_sync(hdev, 0x00); } static int hci_le_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_adv_param cp; u8 status; u8 own_addr_type; u8 enable; if (ext_adv_capable(hdev)) return hci_le_ext_directed_advertising_sync(hdev, conn); /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to false so that the remote device has a * chance of identifying us. */ status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (status) return status; memset(&cp, 0, sizeof(cp)); /* Some controllers might reject command if intervals are not * within range for undirected advertising. * BCM20702A0 is known to be affected by this. */ cp.min_interval = cpu_to_le16(0x0020); cp.max_interval = cpu_to_le16(0x0020); cp.type = LE_ADV_DIRECT_IND; cp.own_address_type = own_addr_type; cp.direct_addr_type = conn->dst_type; bacpy(&cp.direct_addr, &conn->dst); cp.channel_map = hdev->le_adv_channel_map; status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (status) return status; enable = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static void set_ext_conn_params(struct hci_conn *conn, struct hci_cp_le_ext_conn_param *p) { struct hci_dev *hdev = conn->hdev; memset(p, 0, sizeof(*p)); p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); p->conn_latency = cpu_to_le16(conn->le_conn_latency); p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); p->min_ce_len = cpu_to_le16(0x0000); p->max_ce_len = cpu_to_le16(0x0000); } static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 own_addr_type) { struct hci_cp_le_ext_create_conn *cp; struct hci_cp_le_ext_conn_param *p; u8 data[sizeof(*cp) + sizeof(*p) * 3]; u32 plen; cp = (void *)data; p = (void *)cp->data; memset(cp, 0, sizeof(*cp)); bacpy(&cp->peer_addr, &conn->dst); cp->peer_addr_type = conn->dst_type; cp->own_addr_type = own_addr_type; plen = sizeof(*cp); if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M || conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) { cp->phys |= LE_SCAN_PHY_1M; set_ext_conn_params(conn, p); p++; plen += sizeof(*p); } if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M || conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) { cp->phys |= LE_SCAN_PHY_2M; set_ext_conn_params(conn, p); p++; plen += sizeof(*p); } if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED || conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) { cp->phys |= LE_SCAN_PHY_CODED; set_ext_conn_params(conn, p); plen += sizeof(*p); } return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, plen, data, HCI_EV_LE_ENHANCED_CONN_COMPLETE, conn->conn_timeout, NULL); } static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) { struct hci_cp_le_create_conn cp; struct hci_conn_params *params; u8 own_addr_type; int err; struct hci_conn *conn = data; if (!hci_conn_valid(hdev, conn)) return -ECANCELED; bt_dev_dbg(hdev, "conn %p", conn); clear_bit(HCI_CONN_SCANNING, &conn->flags); conn->state = BT_CONNECT; /* If requested to connect as peripheral use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { /* If we're active scanning and simultaneous roles is not * enabled simply reject the attempt. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE && !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { hci_conn_del(conn); return -EBUSY; } /* Pause advertising while doing directed advertising. */ hci_pause_advertising_sync(hdev); err = hci_le_directed_advertising_sync(hdev, conn); goto done; } /* Disable advertising if simultaneous roles is not in use. */ if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) hci_pause_advertising_sync(hdev); params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { conn->le_conn_min_interval = params->conn_min_interval; conn->le_conn_max_interval = params->conn_max_interval; conn->le_conn_latency = params->conn_latency; conn->le_supv_timeout = params->supervision_timeout; } else { conn->le_conn_min_interval = hdev->le_conn_min_interval; conn->le_conn_max_interval = hdev->le_conn_max_interval; conn->le_conn_latency = hdev->le_conn_latency; conn->le_supv_timeout = hdev->le_supv_timeout; } /* If controller is scanning, we stop it since some controllers are * not able to scan and connect at the same time. Also set the * HCI_LE_SCAN_INTERRUPTED flag so that the command complete * handler for scan disabling knows to set the correct discovery * state. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_scan_disable_sync(hdev); hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); } /* Update random address, but set require_privacy to false so * that we never connect with an non-resolvable address. */ err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (err) goto done; /* Send command LE Extended Create Connection if supported */ if (use_ext_conn(hdev)) { err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); goto done; } memset(&cp, 0, sizeof(cp)); cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; cp.own_address_type = own_addr_type; cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); cp.conn_latency = cpu_to_le16(conn->le_conn_latency); cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: * * If this event is unmasked and the HCI_LE_Connection_Complete event * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is * sent when a new connection has been created. */ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp, use_enhanced_conn_complete(hdev) ? HCI_EV_LE_ENHANCED_CONN_COMPLETE : HCI_EV_LE_CONN_COMPLETE, conn->conn_timeout, NULL); done: if (err == -ETIMEDOUT) hci_le_connect_cancel_sync(hdev, conn, 0x00); /* Re-enable advertising after the connection attempt is finished. */ hci_resume_advertising_sync(hdev); return err; } int hci_le_create_cis_sync(struct hci_dev *hdev) { DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f); size_t aux_num_cis = 0; struct hci_conn *conn; u8 cig = BT_ISO_QOS_CIG_UNSET; /* The spec allows only one pending LE Create CIS command at a time. If * the command is pending now, don't do anything. We check for pending * connections after each CIS Established event. * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2566: * * If the Host issues this command before all the * HCI_LE_CIS_Established events from the previous use of the * command have been generated, the Controller shall return the * error code Command Disallowed (0x0C). * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2567: * * When the Controller receives the HCI_LE_Create_CIS command, the * Controller sends the HCI_Command_Status event to the Host. An * HCI_LE_CIS_Established event will be generated for each CIS when it * is established or if it is disconnected or considered lost before * being established; until all the events are generated, the command * remains pending. */ hci_dev_lock(hdev); rcu_read_lock(); /* Wait until previous Create CIS has completed */ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) goto done; } /* Find CIG with all CIS ready */ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { struct hci_conn *link; if (hci_conn_check_create_cis(conn)) continue; cig = conn->iso_qos.ucast.cig; list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { if (hci_conn_check_create_cis(link) > 0 && link->iso_qos.ucast.cig == cig && link->state != BT_CONNECTED) { cig = BT_ISO_QOS_CIG_UNSET; break; } } if (cig != BT_ISO_QOS_CIG_UNSET) break; } if (cig == BT_ISO_QOS_CIG_UNSET) goto done; list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { struct hci_cis *cis = &cmd->cis[aux_num_cis]; if (hci_conn_check_create_cis(conn) || conn->iso_qos.ucast.cig != cig) continue; set_bit(HCI_CONN_CREATE_CIS, &conn->flags); cis->acl_handle = cpu_to_le16(conn->parent->handle); cis->cis_handle = cpu_to_le16(conn->handle); aux_num_cis++; if (aux_num_cis >= cmd->num_cis) break; } cmd->num_cis = aux_num_cis; done: rcu_read_unlock(); hci_dev_unlock(hdev); if (!aux_num_cis) return 0; /* Wait for HCI_LE_CIS_Established */ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, struct_size(cmd, cis, cmd->num_cis), cmd, HCI_EVT_LE_CIS_ESTABLISHED, conn->conn_timeout, NULL); } int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) { struct hci_cp_le_remove_cig cp; memset(&cp, 0, sizeof(cp)); cp.cig_id = handle; return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) { struct hci_cp_le_big_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) { struct hci_cp_le_pa_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(handle); return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, bool use_rpa, struct adv_info *adv_instance, u8 *own_addr_type, bdaddr_t *rand_addr) { int err; bacpy(rand_addr, BDADDR_ANY); /* If privacy is enabled use a resolvable private address. If * current RPA has expired then generate a new one. */ if (use_rpa) { /* If Controller supports LL Privacy use own address type is * 0x03 */ if (ll_privacy_capable(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (adv_instance) { if (adv_rpa_valid(adv_instance)) return 0; } else { if (rpa_valid(hdev)) return 0; } err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } bacpy(rand_addr, &hdev->rpa); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for * non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; bacpy(rand_addr, &nrpa); return 0; } /* No privacy so use a public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static int _update_adv_data_sync(struct hci_dev *hdev, void *data) { u8 instance = PTR_UINT(data); return hci_update_adv_data_sync(hdev, instance); } int hci_update_adv_data(struct hci_dev *hdev, u8 instance) { return hci_cmd_sync_queue(hdev, _update_adv_data_sync, UINT_PTR(instance), NULL); } static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) { struct hci_conn *conn = data; struct inquiry_entry *ie; struct hci_cp_create_conn cp; int err; if (!hci_conn_valid(hdev, conn)) return -ECANCELED; /* Many controllers disallow HCI Create Connection while it is doing * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create * Connection. This may cause the MGMT discovering state to become false * without user space's request but it is okay since the MGMT Discovery * APIs do not promise that discovery should be done forever. Instead, * the user space monitors the status of MGMT discovering and it may * request for discovery again when this flag becomes false. */ if (test_bit(HCI_INQUIRY, &hdev->flags)) { err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); if (err) bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); } conn->state = BT_CONNECT; conn->out = true; conn->role = HCI_ROLE_MASTER; conn->attempt++; conn->link_policy = hdev->link_policy; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) { if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { cp.pscan_rep_mode = ie->data.pscan_rep_mode; cp.pscan_mode = ie->data.pscan_mode; cp.clock_offset = ie->data.clock_offset | cpu_to_le16(0x8000); } memcpy(conn->dev_class, ie->data.dev_class, 3); } cp.pkt_type = cpu_to_le16(conn->pkt_type); if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) cp.role_switch = 0x01; else cp.role_switch = 0x00; return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp, HCI_EV_CONN_COMPLETE, conn->conn_timeout, NULL); } int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) { return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, NULL); } static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) { struct hci_conn *conn = data; bt_dev_dbg(hdev, "err %d", err); if (err == -ECANCELED) return; hci_dev_lock(hdev); if (!hci_conn_valid(hdev, conn)) goto done; if (!err) { hci_connect_le_scan_cleanup(conn, 0x00); goto done; } /* Check if connection is still pending */ if (conn != hci_lookup_le_connect(hdev)) goto done; /* Flush to make sure we send create conn cancel command if needed */ flush_delayed_work(&conn->le_conn_timeout); hci_conn_failed(conn, bt_status(err)); done: hci_dev_unlock(hdev); } int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) { return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, create_le_conn_complete); } int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) { if (conn->state != BT_OPEN) return -EINVAL; switch (conn->type) { case ACL_LINK: return !hci_cmd_sync_dequeue_once(hdev, hci_acl_create_conn_sync, conn, NULL); case LE_LINK: return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, conn, create_le_conn_complete); } return -ENOENT; } int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, struct hci_conn_params *params) { struct hci_cp_le_conn_update cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); cp.conn_latency = cpu_to_le16(params->conn_latency); cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); }
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CGROUP_INTERNAL_H #define __CGROUP_INTERNAL_H #include <linux/cgroup.h> #include <linux/kernfs.h> #include <linux/workqueue.h> #include <linux/list.h> #include <linux/refcount.h> #include <linux/fs_parser.h> #define TRACE_CGROUP_PATH_LEN 1024 extern spinlock_t trace_cgroup_path_lock; extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; extern void __init enable_debug_cgroup(void); /* * cgroup_path() takes a spin lock. It is good practice not to take * spin locks within trace point handlers, as they are mostly hidden * from normal view. As cgroup_path() can take the kernfs_rename_lock * spin lock, it is best to not call that function from the trace event * handler. * * Note: trace_cgroup_##type##_enabled() is a static branch that will only * be set when the trace event is enabled. */ #define TRACE_CGROUP_PATH(type, cgrp, ...) \ do { \ if (trace_cgroup_##type##_enabled()) { \ unsigned long flags; \ spin_lock_irqsave(&trace_cgroup_path_lock, \ flags); \ cgroup_path(cgrp, trace_cgroup_path, \ TRACE_CGROUP_PATH_LEN); \ trace_cgroup_##type(cgrp, trace_cgroup_path, \ ##__VA_ARGS__); \ spin_unlock_irqrestore(&trace_cgroup_path_lock, \ flags); \ } \ } while (0) /* * The cgroup filesystem superblock creation/mount context. */ struct cgroup_fs_context { struct kernfs_fs_context kfc; struct cgroup_root *root; struct cgroup_namespace *ns; unsigned int flags; /* CGRP_ROOT_* flags */ /* cgroup1 bits */ bool cpuset_clone_children; bool none; /* User explicitly requested empty subsystem */ bool all_ss; /* Seen 'all' option */ u16 subsys_mask; /* Selected subsystems */ char *name; /* Hierarchy name */ char *release_agent; /* Path for release notifications */ }; static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; return container_of(kfc, struct cgroup_fs_context, kfc); } struct cgroup_pidlist; struct cgroup_file_ctx { struct cgroup_namespace *ns; struct { void *trigger; } psi; struct { bool started; struct css_task_iter iter; } procs; struct { struct cgroup_pidlist *pidlist; } procs1; struct cgroup_of_peak peak; }; /* * A cgroup can be associated with multiple css_sets as different tasks may * belong to different cgroups on different hierarchies. In the other * direction, a css_set is naturally associated with multiple cgroups. * This M:N relationship is represented by the following link structure * which exists for each association and allows traversing the associations * from both sides. */ struct cgrp_cset_link { /* the cgroup and css_set this link associates */ struct cgroup *cgrp; struct css_set *cset; /* list of cgrp_cset_links anchored at cgrp->cset_links */ struct list_head cset_link; /* list of cgrp_cset_links anchored at css_set->cgrp_links */ struct list_head cgrp_link; }; /* used to track tasks and csets during migration */ struct cgroup_taskset { /* the src and dst cset list running through cset->mg_node */ struct list_head src_csets; struct list_head dst_csets; /* the number of tasks in the set */ int nr_tasks; /* the subsys currently being processed */ int ssid; /* * Fields for cgroup_taskset_*() iteration. * * Before migration is committed, the target migration tasks are on * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of * the csets on ->dst_csets. ->csets point to either ->src_csets * or ->dst_csets depending on whether migration is committed. * * ->cur_csets and ->cur_task point to the current task position * during iteration. */ struct list_head *csets; struct css_set *cur_cset; struct task_struct *cur_task; }; /* migration context also tracks preloading */ struct cgroup_mgctx { /* * Preloaded source and destination csets. Used to guarantee * atomic success or failure on actual migration. */ struct list_head preloaded_src_csets; struct list_head preloaded_dst_csets; /* tasks and csets to migrate */ struct cgroup_taskset tset; /* subsystems affected by migration */ u16 ss_mask; }; #define CGROUP_TASKSET_INIT(tset) \ { \ .src_csets = LIST_HEAD_INIT(tset.src_csets), \ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ .csets = &tset.src_csets, \ } #define CGROUP_MGCTX_INIT(name) \ { \ LIST_HEAD_INIT(name.preloaded_src_csets), \ LIST_HEAD_INIT(name.preloaded_dst_csets), \ CGROUP_TASKSET_INIT(name.tset), \ } #define DEFINE_CGROUP_MGCTX(name) \ struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name) extern struct cgroup_subsys *cgroup_subsys[]; extern struct list_head cgroup_roots; extern bool cgrp_dfl_visible; /* iterate across the hierarchies */ #define for_each_root(root) \ list_for_each_entry_rcu((root), &cgroup_roots, root_list, \ lockdep_is_held(&cgroup_mutex)) /** * for_each_subsys - iterate all enabled cgroup subsystems * @ss: the iteration cursor * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end */ #define for_each_subsys(ss, ssid) \ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) static inline bool cgroup_is_dead(const struct cgroup *cgrp) { return !(cgrp->self.flags & CSS_ONLINE); } static inline bool notify_on_release(const struct cgroup *cgrp) { return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); } void put_css_set_locked(struct css_set *cset); static inline void put_css_set(struct css_set *cset) { unsigned long flags; /* * Ensure that the refcount doesn't hit zero while any readers * can see it. Similar to atomic_dec_and_lock(), but for an * rwlock */ if (refcount_dec_not_one(&cset->refcount)) return; spin_lock_irqsave(&css_set_lock, flags); put_css_set_locked(cset); spin_unlock_irqrestore(&css_set_lock, flags); } /* * refcounted get/put for css_set objects */ static inline void get_css_set(struct css_set *cset) { refcount_inc(&cset->refcount); } bool cgroup_ssid_enabled(int ssid); bool cgroup_on_dfl(const struct cgroup *cgrp); struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root); struct cgroup *task_cgroup_from_root(struct task_struct *task, struct cgroup_root *root); struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline); void cgroup_kn_unlock(struct kernfs_node *kn); int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, struct cgroup_namespace *ns); void cgroup_favor_dynmods(struct cgroup_root *root, bool favor); void cgroup_free_root(struct cgroup_root *root); void init_cgroup_root(struct cgroup_fs_context *ctx); int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask); int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask); int cgroup_do_get_tree(struct fs_context *fc); int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp); void cgroup_migrate_finish(struct cgroup_mgctx *mgctx); void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp, struct cgroup_mgctx *mgctx); int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx); int cgroup_migrate(struct task_struct *leader, bool threadgroup, struct cgroup_mgctx *mgctx); int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, bool threadgroup); void cgroup_attach_lock(bool lock_threadgroup); void cgroup_attach_unlock(bool lock_threadgroup); struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, bool *locked) __acquires(&cgroup_threadgroup_rwsem); void cgroup_procs_write_finish(struct task_struct *task, bool locked) __releases(&cgroup_threadgroup_rwsem); void cgroup_lock_and_drain_offline(struct cgroup *cgrp); int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode); int cgroup_rmdir(struct kernfs_node *kn); int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, struct kernfs_root *kf_root); int __cgroup_task_count(const struct cgroup *cgrp); int cgroup_task_count(const struct cgroup *cgrp); /* * rstat.c */ int cgroup_rstat_init(struct cgroup *cgrp); void cgroup_rstat_exit(struct cgroup *cgrp); void cgroup_rstat_boot(void); void cgroup_base_stat_cputime_show(struct seq_file *seq); /* * namespace.c */ extern const struct proc_ns_operations cgroupns_operations; /* * cgroup-v1.c */ extern struct cftype cgroup1_base_files[]; extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops; extern const struct fs_parameter_spec cgroup1_fs_parameters[]; int proc_cgroupstats_show(struct seq_file *m, void *v); bool cgroup1_ssid_disabled(int ssid); void cgroup1_pidlist_destroy_all(struct cgroup *cgrp); void cgroup1_release_agent(struct work_struct *work); void cgroup1_check_for_release(struct cgroup *cgrp); int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param); int cgroup1_get_tree(struct fs_context *fc); int cgroup1_reconfigure(struct fs_context *ctx); #endif /* __CGROUP_INTERNAL_H */
9 8 8 1 2 2 11 11 11 2 11 1 10 10 2 2 2 8 1 8 8 8 9 8 8 3 2 42 42 42 12 11 11 41 41 42 41 42 42 42 42 41 17 16 16 42 41 42 42 42 42 42 42 42 42 42 42 42 42 44 42 43 42 42 42 3 42 41 44 88 88 88 88 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 /* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/unaligned.h> #include "htc.h" MODULE_FIRMWARE(HTC_7010_MODULE_FW); MODULE_FIRMWARE(HTC_9271_MODULE_FW); static const struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */ { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */ { USB_DEVICE(0x07b8, 0x9271) }, /* Altai WA1011N-GU */ { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */ { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */ { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */ { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */ { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ { USB_DEVICE(0x040D, 0x3801) }, /* VIA */ { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */ { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */ { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */ { USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */ { USB_DEVICE(0x0cf3, 0x7015), .driver_info = AR9287_USB }, /* Atheros */ { USB_DEVICE(0x0cf3, 0x7010), .driver_info = AR9280_USB }, /* Atheros */ { USB_DEVICE(0x0846, 0x9018), .driver_info = AR9280_USB }, /* Netgear WNDA3200 */ { USB_DEVICE(0x083A, 0xA704), .driver_info = AR9280_USB }, /* SMC Networks */ { USB_DEVICE(0x0411, 0x017f), .driver_info = AR9280_USB }, /* Sony UWA-BR100 */ { USB_DEVICE(0x0411, 0x0197), .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */ { USB_DEVICE(0x04da, 0x3904), .driver_info = AR9280_USB }, { USB_DEVICE(0x0930, 0x0a08), .driver_info = AR9280_USB }, /* Toshiba WLM-20U2 and GN-1080 */ { USB_DEVICE(0x0cf3, 0x20ff), .driver_info = STORAGE_DEVICE }, { }, }; MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids); static int __hif_usb_tx(struct hif_device_usb *hif_dev); static void hif_usb_regout_cb(struct urb *urb) { struct cmd_buf *cmd = urb->context; switch (urb->status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: break; } if (cmd) { ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle, cmd->skb, true); kfree(cmd); } return; free: kfree_skb(cmd->skb); kfree(cmd); } static int hif_usb_send_regout(struct hif_device_usb *hif_dev, struct sk_buff *skb) { struct urb *urb; struct cmd_buf *cmd; int ret = 0; urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) return -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) { usb_free_urb(urb); return -ENOMEM; } cmd->skb = skb; cmd->hif_dev = hif_dev; usb_fill_int_urb(urb, hif_dev->udev, usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE), skb->data, skb->len, hif_usb_regout_cb, cmd, 1); usb_anchor_urb(urb, &hif_dev->regout_submitted); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); kfree(cmd); } usb_free_urb(urb); return ret; } static void hif_usb_mgmt_cb(struct urb *urb) { struct cmd_buf *cmd = urb->context; struct hif_device_usb *hif_dev; unsigned long flags; bool txok = true; if (!cmd || !cmd->skb || !cmd->hif_dev) return; hif_dev = cmd->hif_dev; switch (urb->status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: txok = false; /* * If the URBs are being flushed, no need to complete * this packet. */ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); dev_kfree_skb_any(cmd->skb); kfree(cmd); return; } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); break; default: txok = false; break; } skb_pull(cmd->skb, 4); ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle, cmd->skb, txok); kfree(cmd); } static int hif_usb_send_mgmt(struct hif_device_usb *hif_dev, struct sk_buff *skb) { struct urb *urb; struct cmd_buf *cmd; int ret = 0; __le16 *hdr; urb = usb_alloc_urb(0, GFP_ATOMIC); if (urb == NULL) return -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (cmd == NULL) { usb_free_urb(urb); return -ENOMEM; } cmd->skb = skb; cmd->hif_dev = hif_dev; hdr = skb_push(skb, 4); *hdr++ = cpu_to_le16(skb->len - 4); *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); usb_fill_bulk_urb(urb, hif_dev->udev, usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE), skb->data, skb->len, hif_usb_mgmt_cb, cmd); usb_anchor_urb(urb, &hif_dev->mgmt_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { usb_unanchor_urb(urb); kfree(cmd); } usb_free_urb(urb); return ret; } static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev, struct sk_buff_head *list) { struct sk_buff *skb; while ((skb = __skb_dequeue(list)) != NULL) { dev_kfree_skb_any(skb); } } static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev, struct sk_buff_head *queue, bool txok) { struct sk_buff *skb; while ((skb = __skb_dequeue(queue)) != NULL) { #ifdef CONFIG_ATH9K_HTC_DEBUGFS int ln = skb->len; #endif ath9k_htc_txcompletion_cb(hif_dev->htc_handle, skb, txok); if (txok) { TX_STAT_INC(hif_dev, skb_success); TX_STAT_ADD(hif_dev, skb_success_bytes, ln); } else TX_STAT_INC(hif_dev, skb_failed); } } static void hif_usb_tx_cb(struct urb *urb) { struct tx_buf *tx_buf = urb->context; struct hif_device_usb *hif_dev; bool txok = true; if (!tx_buf || !tx_buf->hif_dev) return; hif_dev = tx_buf->hif_dev; switch (urb->status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: txok = false; /* * If the URBs are being flushed, no need to add this * URB to the free list. */ spin_lock(&hif_dev->tx.tx_lock); if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { spin_unlock(&hif_dev->tx.tx_lock); ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue); return; } spin_unlock(&hif_dev->tx.tx_lock); break; default: txok = false; break; } ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, txok); /* Re-initialize the SKB queue */ tx_buf->len = tx_buf->offset = 0; __skb_queue_head_init(&tx_buf->skb_queue); /* Add this TX buffer to the free list */ spin_lock(&hif_dev->tx.tx_lock); list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); hif_dev->tx.tx_buf_cnt++; if (!(hif_dev->tx.flags & HIF_USB_TX_STOP)) __hif_usb_tx(hif_dev); /* Check for pending SKBs */ TX_STAT_INC(hif_dev, buf_completed); spin_unlock(&hif_dev->tx.tx_lock); } /* TX lock has to be taken */ static int __hif_usb_tx(struct hif_device_usb *hif_dev) { struct tx_buf *tx_buf = NULL; struct sk_buff *nskb = NULL; int ret = 0, i; u16 tx_skb_cnt = 0; u8 *buf; __le16 *hdr; if (hif_dev->tx.tx_skb_cnt == 0) return 0; /* Check if a free TX buffer is available */ if (list_empty(&hif_dev->tx.tx_buf)) return 0; tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list); list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending); hif_dev->tx.tx_buf_cnt--; tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM); for (i = 0; i < tx_skb_cnt; i++) { nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue); /* Should never be NULL */ BUG_ON(!nskb); hif_dev->tx.tx_skb_cnt--; buf = tx_buf->buf; buf += tx_buf->offset; hdr = (__le16 *)buf; *hdr++ = cpu_to_le16(nskb->len); *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); buf += 4; memcpy(buf, nskb->data, nskb->len); tx_buf->len = nskb->len + 4; if (i < (tx_skb_cnt - 1)) tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4; if (i == (tx_skb_cnt - 1)) tx_buf->len += tx_buf->offset; __skb_queue_tail(&tx_buf->skb_queue, nskb); TX_STAT_INC(hif_dev, skb_queued); } usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev, usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE), tx_buf->buf, tx_buf->len, hif_usb_tx_cb, tx_buf); ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC); if (ret) { tx_buf->len = tx_buf->offset = 0; ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, false); __skb_queue_head_init(&tx_buf->skb_queue); list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); hif_dev->tx.tx_buf_cnt++; } else { TX_STAT_INC(hif_dev, buf_queued); } return ret; } static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb) { struct ath9k_htc_tx_ctl *tx_ctl; unsigned long flags; int ret = 0; spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); if (hif_dev->tx.flags & HIF_USB_TX_STOP) { spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); return -ENODEV; } /* Check if the max queue count has been reached */ if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) { spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); return -ENOMEM; } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); tx_ctl = HTC_SKB_CB(skb); /* Mgmt/Beacon frames don't use the TX buffer pool */ if ((tx_ctl->type == ATH9K_HTC_MGMT) || (tx_ctl->type == ATH9K_HTC_BEACON)) { ret = hif_usb_send_mgmt(hif_dev, skb); } spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); if ((tx_ctl->type == ATH9K_HTC_NORMAL) || (tx_ctl->type == ATH9K_HTC_AMPDU)) { __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb); hif_dev->tx.tx_skb_cnt++; } /* Check if AMPDUs have to be sent immediately */ if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) && (hif_dev->tx.tx_skb_cnt < 2)) { __hif_usb_tx(hif_dev); } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); return ret; } static void hif_usb_start(void *hif_handle) { struct hif_device_usb *hif_dev = hif_handle; unsigned long flags; hif_dev->flags |= HIF_USB_START; spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_dev->tx.flags &= ~HIF_USB_TX_STOP; spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); } static void hif_usb_stop(void *hif_handle) { struct hif_device_usb *hif_dev = hif_handle; struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; unsigned long flags; spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false); hif_dev->tx.tx_skb_cnt = 0; hif_dev->tx.flags |= HIF_USB_TX_STOP; spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); /* The pending URBs have to be canceled. */ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_pending, list) { usb_get_urb(tx_buf->urb); spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_urb(tx_buf->urb); list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); } static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb) { struct hif_device_usb *hif_dev = hif_handle; int ret = 0; switch (pipe_id) { case USB_WLAN_TX_PIPE: ret = hif_usb_send_tx(hif_dev, skb); break; case USB_REG_OUT_PIPE: ret = hif_usb_send_regout(hif_dev, skb); break; default: dev_err(&hif_dev->udev->dev, "ath9k_htc: Invalid TX pipe: %d\n", pipe_id); ret = -EINVAL; break; } return ret; } static inline bool check_index(struct sk_buff *skb, u8 idx) { struct ath9k_htc_tx_ctl *tx_ctl; tx_ctl = HTC_SKB_CB(skb); if ((tx_ctl->type == ATH9K_HTC_AMPDU) && (tx_ctl->sta_idx == idx)) return true; return false; } static void hif_usb_sta_drain(void *hif_handle, u8 idx) { struct hif_device_usb *hif_dev = hif_handle; struct sk_buff *skb, *tmp; unsigned long flags; spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) { if (check_index(skb, idx)) { __skb_unlink(skb, &hif_dev->tx.tx_skb_queue); ath9k_htc_txcompletion_cb(hif_dev->htc_handle, skb, false); hif_dev->tx.tx_skb_cnt--; TX_STAT_INC(hif_dev, skb_failed); } } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); } static struct ath9k_htc_hif hif_usb = { .transport = ATH9K_HIF_USB, .name = "ath9k_hif_usb", .control_ul_pipe = USB_REG_OUT_PIPE, .control_dl_pipe = USB_REG_IN_PIPE, .start = hif_usb_start, .stop = hif_usb_stop, .sta_drain = hif_usb_sta_drain, .send = hif_usb_send, }; /* Need to free remain_skb allocated in ath9k_hif_usb_rx_stream * in case ath9k_hif_usb_rx_stream wasn't called next time to * process the buffer and subsequently free it. */ static void ath9k_hif_usb_free_rx_remain_skb(struct hif_device_usb *hif_dev) { unsigned long flags; spin_lock_irqsave(&hif_dev->rx_lock, flags); if (hif_dev->remain_skb) { dev_kfree_skb_any(hif_dev->remain_skb); hif_dev->remain_skb = NULL; hif_dev->rx_remain_len = 0; RX_STAT_INC(hif_dev, skb_dropped); } spin_unlock_irqrestore(&hif_dev->rx_lock, flags); } static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, struct sk_buff *skb) { struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER]; int index = 0, i, len = skb->len; int rx_remain_len, rx_pkt_len; u16 pool_index = 0; u8 *ptr; spin_lock(&hif_dev->rx_lock); rx_remain_len = hif_dev->rx_remain_len; rx_pkt_len = hif_dev->rx_transfer_len; if (rx_remain_len != 0) { struct sk_buff *remain_skb = hif_dev->remain_skb; if (remain_skb) { ptr = (u8 *) remain_skb->data; index = rx_remain_len; rx_remain_len -= hif_dev->rx_pad_len; ptr += rx_pkt_len; memcpy(ptr, skb->data, rx_remain_len); rx_pkt_len += rx_remain_len; skb_put(remain_skb, rx_pkt_len); skb_pool[pool_index++] = remain_skb; hif_dev->remain_skb = NULL; hif_dev->rx_remain_len = 0; } else { index = rx_remain_len; } } spin_unlock(&hif_dev->rx_lock); while (index < len) { u16 pkt_len; u16 pkt_tag; u16 pad_len; int chk_idx; ptr = (u8 *) skb->data; pkt_len = get_unaligned_le16(ptr + index); pkt_tag = get_unaligned_le16(ptr + index + 2); /* It is supposed that if we have an invalid pkt_tag or * pkt_len then the whole input SKB is considered invalid * and dropped; the associated packets already in skb_pool * are dropped, too. */ if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) { RX_STAT_INC(hif_dev, skb_dropped); goto invalid_pkt; } if (pkt_len > 2 * MAX_RX_BUF_SIZE) { dev_err(&hif_dev->udev->dev, "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); RX_STAT_INC(hif_dev, skb_dropped); goto invalid_pkt; } pad_len = 4 - (pkt_len & 0x3); if (pad_len == 4) pad_len = 0; chk_idx = index; index = index + 4 + pkt_len + pad_len; if (index > MAX_RX_BUF_SIZE) { spin_lock(&hif_dev->rx_lock); nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); if (!nskb) { dev_err(&hif_dev->udev->dev, "ath9k_htc: RX memory allocation error\n"); spin_unlock(&hif_dev->rx_lock); goto err; } hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; hif_dev->rx_transfer_len = MAX_RX_BUF_SIZE - chk_idx - 4; hif_dev->rx_pad_len = pad_len; skb_reserve(nskb, 32); RX_STAT_INC(hif_dev, skb_allocated); memcpy(nskb->data, &(skb->data[chk_idx+4]), hif_dev->rx_transfer_len); /* Record the buffer pointer */ hif_dev->remain_skb = nskb; spin_unlock(&hif_dev->rx_lock); } else { if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { dev_err(&hif_dev->udev->dev, "ath9k_htc: over RX MAX_PKT_NUM\n"); goto err; } nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); if (!nskb) { dev_err(&hif_dev->udev->dev, "ath9k_htc: RX memory allocation error\n"); goto err; } skb_reserve(nskb, 32); RX_STAT_INC(hif_dev, skb_allocated); memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len); skb_put(nskb, pkt_len); skb_pool[pool_index++] = nskb; } } err: for (i = 0; i < pool_index; i++) { RX_STAT_ADD(hif_dev, skb_completed_bytes, skb_pool[i]->len); ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i], skb_pool[i]->len, USB_WLAN_RX_PIPE); RX_STAT_INC(hif_dev, skb_completed); } return; invalid_pkt: for (i = 0; i < pool_index; i++) { dev_kfree_skb_any(skb_pool[i]); RX_STAT_INC(hif_dev, skb_dropped); } return; } static void ath9k_hif_usb_rx_cb(struct urb *urb) { struct rx_buf *rx_buf = urb->context; struct hif_device_usb *hif_dev = rx_buf->hif_dev; struct sk_buff *skb = rx_buf->skb; int ret; if (!skb) return; if (!hif_dev) goto free; switch (urb->status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: goto resubmit; } if (likely(urb->actual_length != 0)) { skb_put(skb, urb->actual_length); ath9k_hif_usb_rx_stream(hif_dev, skb); } resubmit: __skb_set_length(skb, 0); usb_anchor_urb(urb, &hif_dev->rx_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { usb_unanchor_urb(urb); goto free; } return; free: kfree_skb(skb); kfree(rx_buf); } static void ath9k_hif_usb_reg_in_cb(struct urb *urb) { struct rx_buf *rx_buf = urb->context; struct hif_device_usb *hif_dev = rx_buf->hif_dev; struct sk_buff *skb = rx_buf->skb; int ret; if (!skb) return; if (!hif_dev) goto free_skb; switch (urb->status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free_skb; default: __skb_set_length(skb, 0); goto resubmit; } if (likely(urb->actual_length != 0)) { skb_put(skb, urb->actual_length); /* * Process the command first. * skb is either freed here or passed to be * managed to another callback function. */ ath9k_htc_rx_msg(hif_dev->htc_handle, skb, skb->len, USB_REG_IN_PIPE); skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); if (!skb) { dev_err(&hif_dev->udev->dev, "ath9k_htc: REG_IN memory allocation failure\n"); goto free_rx_buf; } rx_buf->skb = skb; usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), skb->data, MAX_REG_IN_BUF_SIZE, ath9k_hif_usb_reg_in_cb, rx_buf, 1); } resubmit: usb_anchor_urb(urb, &hif_dev->reg_in_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { usb_unanchor_urb(urb); goto free_skb; } return; free_skb: kfree_skb(skb); free_rx_buf: kfree(rx_buf); urb->context = NULL; } static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) { struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; unsigned long flags; spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_buf, list) { list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_dev->tx.flags |= HIF_USB_TX_FLUSH; spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_pending, list) { usb_get_urb(tx_buf->urb); spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_urb(tx_buf->urb); list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); } static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) { struct tx_buf *tx_buf; int i; INIT_LIST_HEAD(&hif_dev->tx.tx_buf); INIT_LIST_HEAD(&hif_dev->tx.tx_pending); spin_lock_init(&hif_dev->tx.tx_lock); __skb_queue_head_init(&hif_dev->tx.tx_skb_queue); init_usb_anchor(&hif_dev->mgmt_submitted); for (i = 0; i < MAX_TX_URB_NUM; i++) { tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); if (!tx_buf) goto err; tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL); if (!tx_buf->buf) goto err; tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL); if (!tx_buf->urb) goto err; tx_buf->hif_dev = hif_dev; __skb_queue_head_init(&tx_buf->skb_queue); list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf); } hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM; return 0; err: if (tx_buf) { kfree(tx_buf->buf); kfree(tx_buf); } ath9k_hif_usb_dealloc_tx_urbs(hif_dev); return -ENOMEM; } static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) { usb_kill_anchored_urbs(&hif_dev->rx_submitted); ath9k_hif_usb_free_rx_remain_skb(hif_dev); } static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) { struct rx_buf *rx_buf = NULL; struct sk_buff *skb = NULL; struct urb *urb = NULL; int i, ret; init_usb_anchor(&hif_dev->rx_submitted); spin_lock_init(&hif_dev->rx_lock); for (i = 0; i < MAX_RX_URB_NUM; i++) { rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); if (!rx_buf) { ret = -ENOMEM; goto err_rxb; } /* Allocate URB */ urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { ret = -ENOMEM; goto err_urb; } /* Allocate buffer */ skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err_skb; } rx_buf->hif_dev = hif_dev; rx_buf->skb = skb; usb_fill_bulk_urb(urb, hif_dev->udev, usb_rcvbulkpipe(hif_dev->udev, USB_WLAN_RX_PIPE), skb->data, MAX_RX_BUF_SIZE, ath9k_hif_usb_rx_cb, rx_buf); /* Anchor URB */ usb_anchor_urb(urb, &hif_dev->rx_submitted); /* Submit URB */ ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); goto err_submit; } /* * Drop reference count. * This ensures that the URB is freed when killing them. */ usb_free_urb(urb); } return 0; err_submit: kfree_skb(skb); err_skb: usb_free_urb(urb); err_urb: kfree(rx_buf); err_rxb: ath9k_hif_usb_dealloc_rx_urbs(hif_dev); return ret; } static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) { usb_kill_anchored_urbs(&hif_dev->reg_in_submitted); } static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) { struct rx_buf *rx_buf = NULL; struct sk_buff *skb = NULL; struct urb *urb = NULL; int i, ret; init_usb_anchor(&hif_dev->reg_in_submitted); for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); if (!rx_buf) { ret = -ENOMEM; goto err_rxb; } /* Allocate URB */ urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) { ret = -ENOMEM; goto err_urb; } /* Allocate buffer */ skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err_skb; } rx_buf->hif_dev = hif_dev; rx_buf->skb = skb; usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), skb->data, MAX_REG_IN_BUF_SIZE, ath9k_hif_usb_reg_in_cb, rx_buf, 1); /* Anchor URB */ usb_anchor_urb(urb, &hif_dev->reg_in_submitted); /* Submit URB */ ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); goto err_submit; } /* * Drop reference count. * This ensures that the URB is freed when killing them. */ usb_free_urb(urb); } return 0; err_submit: kfree_skb(skb); err_skb: usb_free_urb(urb); err_urb: kfree(rx_buf); err_rxb: ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); return ret; } static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev) { /* Register Write */ init_usb_anchor(&hif_dev->regout_submitted); /* TX */ if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0) goto err; /* RX */ if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0) goto err_rx; /* Register Read */ if (ath9k_hif_usb_alloc_reg_in_urbs(hif_dev) < 0) goto err_reg; return 0; err_reg: ath9k_hif_usb_dealloc_rx_urbs(hif_dev); err_rx: ath9k_hif_usb_dealloc_tx_urbs(hif_dev); err: return -ENOMEM; } void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) { usb_kill_anchored_urbs(&hif_dev->regout_submitted); ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); ath9k_hif_usb_dealloc_tx_urbs(hif_dev); ath9k_hif_usb_dealloc_rx_urbs(hif_dev); } static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) { int transfer, err; const void *data = hif_dev->fw_data; size_t len = hif_dev->fw_size; u32 addr = AR9271_FIRMWARE; u8 *buf = kzalloc(4096, GFP_KERNEL); u32 firm_offset; if (!buf) return -ENOMEM; while (len) { transfer = min_t(size_t, len, 4096); memcpy(buf, data, transfer); err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0), FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT, addr >> 8, 0, buf, transfer, USB_MSG_TIMEOUT); if (err < 0) { kfree(buf); return err; } len -= transfer; data += transfer; addr += transfer; } kfree(buf); if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info)) firm_offset = AR7010_FIRMWARE_TEXT; else firm_offset = AR9271_FIRMWARE_TEXT; /* * Issue FW download complete command to firmware. */ err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0), FIRMWARE_DOWNLOAD_COMP, 0x40 | USB_DIR_OUT, firm_offset >> 8, 0, NULL, 0, USB_MSG_TIMEOUT); if (err) return -EIO; dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n", hif_dev->fw_name, (unsigned long) hif_dev->fw_size); return 0; } static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) { int ret; ret = ath9k_hif_usb_download_fw(hif_dev); if (ret) { dev_err(&hif_dev->udev->dev, "ath9k_htc: Firmware - %s download failed\n", hif_dev->fw_name); return ret; } /* Alloc URBs */ ret = ath9k_hif_usb_alloc_urbs(hif_dev); if (ret) { dev_err(&hif_dev->udev->dev, "ath9k_htc: Unable to allocate URBs\n"); return ret; } return 0; } static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev) { ath9k_hif_usb_dealloc_urbs(hif_dev); } /* * If initialization fails or the FW cannot be retrieved, * detach the device. */ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev) { struct device *dev = &hif_dev->udev->dev; struct device *parent = dev->parent; complete_all(&hif_dev->fw_done); if (parent) device_lock(parent); device_release_driver(dev); if (parent) device_unlock(parent); } static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context); /* taken from iwlwifi */ static int ath9k_hif_request_firmware(struct hif_device_usb *hif_dev, bool first) { char index[8], *chip; int ret; if (first) { if (htc_use_dev_fw) { hif_dev->fw_minor_index = FIRMWARE_MINOR_IDX_MAX + 1; sprintf(index, "%s", "dev"); } else { hif_dev->fw_minor_index = FIRMWARE_MINOR_IDX_MAX; sprintf(index, "%d", hif_dev->fw_minor_index); } } else { hif_dev->fw_minor_index--; sprintf(index, "%d", hif_dev->fw_minor_index); } /* test for FW 1.3 */ if (MAJOR_VERSION_REQ == 1 && hif_dev->fw_minor_index == 3) { const char *filename; if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info)) filename = FIRMWARE_AR7010_1_1; else filename = FIRMWARE_AR9271; /* expected fw locations: * - htc_9271.fw (stable version 1.3, deprecated) */ snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name), "%s", filename); } else if (hif_dev->fw_minor_index < FIRMWARE_MINOR_IDX_MIN) { dev_err(&hif_dev->udev->dev, "no suitable firmware found!\n"); return -ENOENT; } else { if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info)) chip = "7010"; else chip = "9271"; /* expected fw locations: * - ath9k_htc/htc_9271-1.dev.0.fw (development version) * - ath9k_htc/htc_9271-1.4.0.fw (stable version) */ snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name), "%s/htc_%s-%d.%s.0.fw", HTC_FW_PATH, chip, MAJOR_VERSION_REQ, index); } ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name, &hif_dev->udev->dev, GFP_KERNEL, hif_dev, ath9k_hif_usb_firmware_cb); if (ret) { dev_err(&hif_dev->udev->dev, "ath9k_htc: Async request for firmware %s failed\n", hif_dev->fw_name); return ret; } dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n", hif_dev->fw_name); return ret; } static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context) { struct hif_device_usb *hif_dev = context; int ret; if (!fw) { ret = ath9k_hif_request_firmware(hif_dev, false); if (!ret) return; dev_err(&hif_dev->udev->dev, "ath9k_htc: Failed to get firmware %s\n", hif_dev->fw_name); goto err_fw; } hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb, &hif_dev->udev->dev); if (hif_dev->htc_handle == NULL) goto err_dev_alloc; hif_dev->fw_data = fw->data; hif_dev->fw_size = fw->size; /* Proceed with initialization */ ret = ath9k_hif_usb_dev_init(hif_dev); if (ret) goto err_dev_init; ret = ath9k_htc_hw_init(hif_dev->htc_handle, &hif_dev->interface->dev, hif_dev->usb_device_id->idProduct, hif_dev->udev->product, hif_dev->usb_device_id->driver_info); if (ret) { ret = -EINVAL; goto err_htc_hw_init; } release_firmware(fw); hif_dev->flags |= HIF_USB_READY; complete_all(&hif_dev->fw_done); return; err_htc_hw_init: ath9k_hif_usb_dev_deinit(hif_dev); err_dev_init: ath9k_htc_hw_free(hif_dev->htc_handle); err_dev_alloc: release_firmware(fw); err_fw: ath9k_hif_usb_firmware_fail(hif_dev); } /* * An exact copy of the function from zd1211rw. */ static int send_eject_command(struct usb_interface *interface) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *iface_desc = interface->cur_altsetting; struct usb_endpoint_descriptor *endpoint; unsigned char *cmd; u8 bulk_out_ep; int r; if (iface_desc->desc.bNumEndpoints < 2) return -ENODEV; /* Find bulk out endpoint */ for (r = 1; r >= 0; r--) { endpoint = &iface_desc->endpoint[r].desc; if (usb_endpoint_dir_out(endpoint) && usb_endpoint_xfer_bulk(endpoint)) { bulk_out_ep = endpoint->bEndpointAddress; break; } } if (r == -1) { dev_err(&udev->dev, "ath9k_htc: Could not find bulk out endpoint\n"); return -ENODEV; } cmd = kzalloc(31, GFP_KERNEL); if (cmd == NULL) return -ENODEV; /* USB bulk command block */ cmd[0] = 0x55; /* bulk command signature */ cmd[1] = 0x53; /* bulk command signature */ cmd[2] = 0x42; /* bulk command signature */ cmd[3] = 0x43; /* bulk command signature */ cmd[14] = 6; /* command length */ cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */ cmd[19] = 0x2; /* eject disc */ dev_info(&udev->dev, "Ejecting storage device...\n"); r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep), cmd, 31, NULL, 2 * USB_MSG_TIMEOUT); kfree(cmd); if (r) return r; /* At this point, the device disconnects and reconnects with the real * ID numbers. */ usb_set_intfdata(interface, NULL); return 0; } static int ath9k_hif_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out; struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *alt; struct hif_device_usb *hif_dev; int ret = 0; /* Verify the expected endpoints are present */ alt = interface->cur_altsetting; if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 || usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE || usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE || usb_endpoint_num(int_in) != USB_REG_IN_PIPE || usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) { dev_err(&udev->dev, "ath9k_htc: Device endpoint numbers are not the expected ones\n"); return -ENODEV; } if (id->driver_info == STORAGE_DEVICE) return send_eject_command(interface); hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL); if (!hif_dev) { ret = -ENOMEM; goto err_alloc; } usb_get_dev(udev); hif_dev->udev = udev; hif_dev->interface = interface; hif_dev->usb_device_id = id; #ifdef CONFIG_PM udev->reset_resume = 1; #endif usb_set_intfdata(interface, hif_dev); init_completion(&hif_dev->fw_done); ret = ath9k_hif_request_firmware(hif_dev, true); if (ret) goto err_fw_req; return ret; err_fw_req: usb_set_intfdata(interface, NULL); kfree(hif_dev); usb_put_dev(udev); err_alloc: return ret; } static void ath9k_hif_usb_reboot(struct usb_device *udev) { u32 reboot_cmd = 0xffffffff; void *buf; int ret; buf = kmemdup(&reboot_cmd, 4, GFP_KERNEL); if (!buf) return; ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE), buf, 4, NULL, USB_MSG_TIMEOUT); if (ret) dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n"); kfree(buf); } static void ath9k_hif_usb_disconnect(struct usb_interface *interface) { struct usb_device *udev = interface_to_usbdev(interface); struct hif_device_usb *hif_dev = usb_get_intfdata(interface); bool unplugged = udev->state == USB_STATE_NOTATTACHED; if (!hif_dev) return; wait_for_completion(&hif_dev->fw_done); if (hif_dev->flags & HIF_USB_READY) { ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); ath9k_htc_hw_free(hif_dev->htc_handle); } usb_set_intfdata(interface, NULL); /* If firmware was loaded we should drop it * go back to first stage bootloader. */ if (!unplugged && (hif_dev->flags & HIF_USB_READY)) ath9k_hif_usb_reboot(udev); kfree(hif_dev); dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n"); usb_put_dev(udev); } #ifdef CONFIG_PM static int ath9k_hif_usb_suspend(struct usb_interface *interface, pm_message_t message) { struct hif_device_usb *hif_dev = usb_get_intfdata(interface); /* * The device has to be set to FULLSLEEP mode in case no * interface is up. */ if (!(hif_dev->flags & HIF_USB_START)) ath9k_htc_suspend(hif_dev->htc_handle); wait_for_completion(&hif_dev->fw_done); if (hif_dev->flags & HIF_USB_READY) ath9k_hif_usb_dealloc_urbs(hif_dev); return 0; } static int ath9k_hif_usb_resume(struct usb_interface *interface) { struct hif_device_usb *hif_dev = usb_get_intfdata(interface); struct htc_target *htc_handle = hif_dev->htc_handle; const struct firmware *fw; int ret; ret = ath9k_hif_usb_alloc_urbs(hif_dev); if (ret) return ret; if (!(hif_dev->flags & HIF_USB_READY)) { ret = -EIO; goto fail_resume; } /* request cached firmware during suspend/resume cycle */ ret = request_firmware(&fw, hif_dev->fw_name, &hif_dev->udev->dev); if (ret) goto fail_resume; hif_dev->fw_data = fw->data; hif_dev->fw_size = fw->size; ret = ath9k_hif_usb_download_fw(hif_dev); release_firmware(fw); if (ret) goto fail_resume; mdelay(100); ret = ath9k_htc_resume(htc_handle); if (ret) goto fail_resume; return 0; fail_resume: ath9k_hif_usb_dealloc_urbs(hif_dev); return ret; } #endif static struct usb_driver ath9k_hif_usb_driver = { .name = KBUILD_MODNAME, .probe = ath9k_hif_usb_probe, .disconnect = ath9k_hif_usb_disconnect, #ifdef CONFIG_PM .suspend = ath9k_hif_usb_suspend, .resume = ath9k_hif_usb_resume, .reset_resume = ath9k_hif_usb_resume, #endif .id_table = ath9k_hif_usb_ids, .soft_unbind = 1, .disable_hub_initiated_lpm = 1, }; int ath9k_hif_usb_init(void) { return usb_register(&ath9k_hif_usb_driver); } void ath9k_hif_usb_exit(void) { usb_deregister(&ath9k_hif_usb_driver); }
2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 // SPDX-License-Identifier: GPL-2.0-or-later /* * Mirics MSi2500 driver * Mirics MSi3101 SDR Dongle driver * * Copyright (C) 2013 Antti Palosaari <crope@iki.fi> * * That driver is somehow based of pwc driver: * (C) 1999-2004 Nemosoft Unv. * (C) 2004-2006 Luc Saillard (luc@saillard.org) * (C) 2011 Hans de Goede <hdegoede@redhat.com> */ #include <linux/module.h> #include <linux/slab.h> #include <asm/div64.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <linux/usb.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> #include <linux/spi/spi.h> static bool msi2500_emulated_fmt; module_param_named(emulated_formats, msi2500_emulated_fmt, bool, 0644); MODULE_PARM_DESC(emulated_formats, "enable emulated formats (disappears in future)"); /* * iConfiguration 0 * bInterfaceNumber 0 * bAlternateSetting 1 * bNumEndpoints 1 * bEndpointAddress 0x81 EP 1 IN * bmAttributes 1 * Transfer Type Isochronous * wMaxPacketSize 0x1400 3x 1024 bytes * bInterval 1 */ #define MAX_ISO_BUFS (8) #define ISO_FRAMES_PER_DESC (8) #define ISO_MAX_FRAME_SIZE (3 * 1024) #define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) #define MAX_ISOC_ERRORS 20 /* * TODO: These formats should be moved to V4L2 API. Formats are currently * disabled from formats[] table, not visible to userspace. */ /* signed 12-bit */ #define MSI2500_PIX_FMT_SDR_S12 v4l2_fourcc('D', 'S', '1', '2') /* Mirics MSi2500 format 384 */ #define MSI2500_PIX_FMT_SDR_MSI2500_384 v4l2_fourcc('M', '3', '8', '4') static const struct v4l2_frequency_band bands[] = { { .tuner = 0, .type = V4L2_TUNER_ADC, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 1200000, .rangehigh = 15000000, }, }; /* stream formats */ struct msi2500_format { u32 pixelformat; u32 buffersize; }; /* format descriptions for capture and preview */ static struct msi2500_format formats[] = { { .pixelformat = V4L2_SDR_FMT_CS8, .buffersize = 3 * 1008, #if 0 }, { .pixelformat = MSI2500_PIX_FMT_SDR_MSI2500_384, }, { .pixelformat = MSI2500_PIX_FMT_SDR_S12, #endif }, { .pixelformat = V4L2_SDR_FMT_CS14LE, .buffersize = 3 * 1008, }, { .pixelformat = V4L2_SDR_FMT_CU8, .buffersize = 3 * 1008, }, { .pixelformat = V4L2_SDR_FMT_CU16LE, .buffersize = 3 * 1008, }, }; static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats); /* intermediate buffers with raw data from the USB device */ struct msi2500_frame_buf { /* common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vb; struct list_head list; }; struct msi2500_dev { struct device *dev; struct video_device vdev; struct v4l2_device v4l2_dev; struct v4l2_subdev *v4l2_subdev; struct spi_controller *ctlr; /* videobuf2 queue and queued buffers list */ struct vb2_queue vb_queue; struct list_head queued_bufs; spinlock_t queued_bufs_lock; /* Protects queued_bufs */ /* Note if taking both locks v4l2_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */ /* Pointer to our usb_device, will be NULL after unplug */ struct usb_device *udev; /* Both mutexes most be hold when setting! */ unsigned int f_adc; u32 pixelformat; u32 buffersize; unsigned int num_formats; unsigned int isoc_errors; /* number of contiguous ISOC errors */ unsigned int vb_full; /* vb is full and packets dropped */ struct urb *urbs[MAX_ISO_BUFS]; /* Controls */ struct v4l2_ctrl_handler hdl; u32 next_sample; /* for track lost packets */ u32 sample; /* for sample rate calc */ unsigned long jiffies_next; }; /* Private functions */ static struct msi2500_frame_buf *msi2500_get_next_fill_buf( struct msi2500_dev *dev) { unsigned long flags; struct msi2500_frame_buf *buf = NULL; spin_lock_irqsave(&dev->queued_bufs_lock, flags); if (list_empty(&dev->queued_bufs)) goto leave; buf = list_entry(dev->queued_bufs.next, struct msi2500_frame_buf, list); list_del(&buf->list); leave: spin_unlock_irqrestore(&dev->queued_bufs_lock, flags); return buf; } /* * +=========================================================================== * | 00-1023 | USB packet type '504' * +=========================================================================== * | 00- 03 | sequence number of first sample in that USB packet * +--------------------------------------------------------------------------- * | 04- 15 | garbage * +--------------------------------------------------------------------------- * | 16-1023 | samples * +--------------------------------------------------------------------------- * signed 8-bit sample * 504 * 2 = 1008 samples * * * +=========================================================================== * | 00-1023 | USB packet type '384' * +=========================================================================== * | 00- 03 | sequence number of first sample in that USB packet * +--------------------------------------------------------------------------- * | 04- 15 | garbage * +--------------------------------------------------------------------------- * | 16- 175 | samples * +--------------------------------------------------------------------------- * | 176- 179 | control bits for previous samples * +--------------------------------------------------------------------------- * | 180- 339 | samples * +--------------------------------------------------------------------------- * | 340- 343 | control bits for previous samples * +--------------------------------------------------------------------------- * | 344- 503 | samples * +--------------------------------------------------------------------------- * | 504- 507 | control bits for previous samples * +--------------------------------------------------------------------------- * | 508- 667 | samples * +--------------------------------------------------------------------------- * | 668- 671 | control bits for previous samples * +--------------------------------------------------------------------------- * | 672- 831 | samples * +--------------------------------------------------------------------------- * | 832- 835 | control bits for previous samples * +--------------------------------------------------------------------------- * | 836- 995 | samples * +--------------------------------------------------------------------------- * | 996- 999 | control bits for previous samples * +--------------------------------------------------------------------------- * | 1000-1023 | garbage * +--------------------------------------------------------------------------- * * Bytes 4 - 7 could have some meaning? * * Control bits for previous samples is 32-bit field, containing 16 x 2-bit * numbers. This results one 2-bit number for 8 samples. It is likely used for * bit shifting sample by given bits, increasing actual sampling resolution. * Number 2 (0b10) was never seen. * * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes * * * +=========================================================================== * | 00-1023 | USB packet type '336' * +=========================================================================== * | 00- 03 | sequence number of first sample in that USB packet * +--------------------------------------------------------------------------- * | 04- 15 | garbage * +--------------------------------------------------------------------------- * | 16-1023 | samples * +--------------------------------------------------------------------------- * signed 12-bit sample * * * +=========================================================================== * | 00-1023 | USB packet type '252' * +=========================================================================== * | 00- 03 | sequence number of first sample in that USB packet * +--------------------------------------------------------------------------- * | 04- 15 | garbage * +--------------------------------------------------------------------------- * | 16-1023 | samples * +--------------------------------------------------------------------------- * signed 14-bit sample */ static int msi2500_convert_stream(struct msi2500_dev *dev, u8 *dst, u8 *src, unsigned int src_len) { unsigned int i, j, transactions, dst_len = 0; u32 sample[3]; /* There could be 1-3 1024 byte transactions per packet */ transactions = src_len / 1024; for (i = 0; i < transactions; i++) { sample[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0; if (i == 0 && dev->next_sample != sample[0]) { dev_dbg_ratelimited(dev->dev, "%d samples lost, %d %08x:%08x\n", sample[0] - dev->next_sample, src_len, dev->next_sample, sample[0]); } /* * Dump all unknown 'garbage' data - maybe we will discover * someday if there is something rational... */ dev_dbg_ratelimited(dev->dev, "%*ph\n", 12, &src[4]); src += 16; /* skip header */ switch (dev->pixelformat) { case V4L2_SDR_FMT_CU8: /* 504 x IQ samples */ { s8 *s8src = (s8 *)src; u8 *u8dst = (u8 *)dst; for (j = 0; j < 1008; j++) *u8dst++ = *s8src++ + 128; src += 1008; dst += 1008; dst_len += 1008; dev->next_sample = sample[i] + 504; break; } case V4L2_SDR_FMT_CU16LE: /* 252 x IQ samples */ { s16 *s16src = (s16 *)src; u16 *u16dst = (u16 *)dst; struct {signed int x:14; } se; /* sign extension */ unsigned int utmp; for (j = 0; j < 1008; j += 2) { /* sign extension from 14-bit to signed int */ se.x = *s16src++; /* from signed int to unsigned int */ utmp = se.x + 8192; /* from 14-bit to 16-bit */ *u16dst++ = utmp << 2 | utmp >> 12; } src += 1008; dst += 1008; dst_len += 1008; dev->next_sample = sample[i] + 252; break; } case MSI2500_PIX_FMT_SDR_MSI2500_384: /* 384 x IQ samples */ /* Dump unknown 'garbage' data */ dev_dbg_ratelimited(dev->dev, "%*ph\n", 24, &src[1000]); memcpy(dst, src, 984); src += 984 + 24; dst += 984; dst_len += 984; dev->next_sample = sample[i] + 384; break; case V4L2_SDR_FMT_CS8: /* 504 x IQ samples */ memcpy(dst, src, 1008); src += 1008; dst += 1008; dst_len += 1008; dev->next_sample = sample[i] + 504; break; case MSI2500_PIX_FMT_SDR_S12: /* 336 x IQ samples */ memcpy(dst, src, 1008); src += 1008; dst += 1008; dst_len += 1008; dev->next_sample = sample[i] + 336; break; case V4L2_SDR_FMT_CS14LE: /* 252 x IQ samples */ memcpy(dst, src, 1008); src += 1008; dst += 1008; dst_len += 1008; dev->next_sample = sample[i] + 252; break; default: break; } } /* calculate sample rate and output it in 10 seconds intervals */ if (unlikely(time_is_before_jiffies(dev->jiffies_next))) { #define MSECS 10000UL unsigned int msecs = jiffies_to_msecs(jiffies - dev->jiffies_next + msecs_to_jiffies(MSECS)); unsigned int samples = dev->next_sample - dev->sample; dev->jiffies_next = jiffies + msecs_to_jiffies(MSECS); dev->sample = dev->next_sample; dev_dbg(dev->dev, "size=%u samples=%u msecs=%u sample rate=%lu\n", src_len, samples, msecs, samples * 1000UL / msecs); } return dst_len; } /* * This gets called for the Isochronous pipe (stream). This is done in interrupt * time, so it has to be fast, not crash, and not stall. Neat. */ static void msi2500_isoc_handler(struct urb *urb) { struct msi2500_dev *dev = (struct msi2500_dev *)urb->context; int i, flen, fstatus; unsigned char *iso_buf = NULL; struct msi2500_frame_buf *fbuf; if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) { dev_dbg(dev->dev, "URB (%p) unlinked %ssynchronously\n", urb, urb->status == -ENOENT ? "" : "a"); return; } if (unlikely(urb->status != 0)) { dev_dbg(dev->dev, "called with status %d\n", urb->status); /* Give up after a number of contiguous errors */ if (++dev->isoc_errors > MAX_ISOC_ERRORS) dev_dbg(dev->dev, "Too many ISOC errors, bailing out\n"); goto handler_end; } else { /* Reset ISOC error counter. We did get here, after all. */ dev->isoc_errors = 0; } /* Compact data */ for (i = 0; i < urb->number_of_packets; i++) { void *ptr; /* Check frame error */ fstatus = urb->iso_frame_desc[i].status; if (unlikely(fstatus)) { dev_dbg_ratelimited(dev->dev, "frame=%d/%d has error %d skipping\n", i, urb->number_of_packets, fstatus); continue; } /* Check if that frame contains data */ flen = urb->iso_frame_desc[i].actual_length; if (unlikely(flen == 0)) continue; iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset; /* Get free framebuffer */ fbuf = msi2500_get_next_fill_buf(dev); if (unlikely(fbuf == NULL)) { dev->vb_full++; dev_dbg_ratelimited(dev->dev, "video buffer is full, %d packets dropped\n", dev->vb_full); continue; } /* fill framebuffer */ ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0); flen = msi2500_convert_stream(dev, ptr, iso_buf, flen); vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, flen); vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE); } handler_end: i = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(i != 0)) dev_dbg(dev->dev, "Error (%d) re-submitting urb\n", i); } static void msi2500_iso_stop(struct msi2500_dev *dev) { int i; dev_dbg(dev->dev, "\n"); /* Unlinking ISOC buffers one by one */ for (i = 0; i < MAX_ISO_BUFS; i++) { if (dev->urbs[i]) { dev_dbg(dev->dev, "Unlinking URB %p\n", dev->urbs[i]); usb_kill_urb(dev->urbs[i]); } } } static void msi2500_iso_free(struct msi2500_dev *dev) { int i; dev_dbg(dev->dev, "\n"); /* Freeing ISOC buffers one by one */ for (i = 0; i < MAX_ISO_BUFS; i++) { if (dev->urbs[i]) { dev_dbg(dev->dev, "Freeing URB\n"); if (dev->urbs[i]->transfer_buffer) { usb_free_coherent(dev->udev, dev->urbs[i]->transfer_buffer_length, dev->urbs[i]->transfer_buffer, dev->urbs[i]->transfer_dma); } usb_free_urb(dev->urbs[i]); dev->urbs[i] = NULL; } } } /* Both v4l2_lock and vb_queue_lock should be locked when calling this */ static void msi2500_isoc_cleanup(struct msi2500_dev *dev) { dev_dbg(dev->dev, "\n"); msi2500_iso_stop(dev); msi2500_iso_free(dev); } /* Both v4l2_lock and vb_queue_lock should be locked when calling this */ static int msi2500_isoc_init(struct msi2500_dev *dev) { struct urb *urb; int i, j, ret; dev_dbg(dev->dev, "\n"); dev->isoc_errors = 0; ret = usb_set_interface(dev->udev, 0, 1); if (ret) return ret; /* Allocate and init Isochronuous urbs */ for (i = 0; i < MAX_ISO_BUFS; i++) { urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); if (urb == NULL) { msi2500_isoc_cleanup(dev); return -ENOMEM; } dev->urbs[i] = urb; dev_dbg(dev->dev, "Allocated URB at 0x%p\n", urb); urb->interval = 1; urb->dev = dev->udev; urb->pipe = usb_rcvisocpipe(dev->udev, 0x81); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer = usb_alloc_coherent(dev->udev, ISO_BUFFER_SIZE, GFP_KERNEL, &urb->transfer_dma); if (urb->transfer_buffer == NULL) { dev_err(dev->dev, "Failed to allocate urb buffer %d\n", i); msi2500_isoc_cleanup(dev); return -ENOMEM; } urb->transfer_buffer_length = ISO_BUFFER_SIZE; urb->complete = msi2500_isoc_handler; urb->context = dev; urb->start_frame = 0; urb->number_of_packets = ISO_FRAMES_PER_DESC; for (j = 0; j < ISO_FRAMES_PER_DESC; j++) { urb->iso_frame_desc[j].offset = j * ISO_MAX_FRAME_SIZE; urb->iso_frame_desc[j].length = ISO_MAX_FRAME_SIZE; } } /* link */ for (i = 0; i < MAX_ISO_BUFS; i++) { ret = usb_submit_urb(dev->urbs[i], GFP_KERNEL); if (ret) { dev_err(dev->dev, "usb_submit_urb %d failed with error %d\n", i, ret); msi2500_isoc_cleanup(dev); return ret; } dev_dbg(dev->dev, "URB 0x%p submitted.\n", dev->urbs[i]); } /* All is done... */ return 0; } /* Must be called with vb_queue_lock hold */ static void msi2500_cleanup_queued_bufs(struct msi2500_dev *dev) { unsigned long flags; dev_dbg(dev->dev, "\n"); spin_lock_irqsave(&dev->queued_bufs_lock, flags); while (!list_empty(&dev->queued_bufs)) { struct msi2500_frame_buf *buf; buf = list_entry(dev->queued_bufs.next, struct msi2500_frame_buf, list); list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&dev->queued_bufs_lock, flags); } /* The user yanked out the cable... */ static void msi2500_disconnect(struct usb_interface *intf) { struct v4l2_device *v = usb_get_intfdata(intf); struct msi2500_dev *dev = container_of(v, struct msi2500_dev, v4l2_dev); dev_dbg(dev->dev, "\n"); mutex_lock(&dev->vb_queue_lock); mutex_lock(&dev->v4l2_lock); /* No need to keep the urbs around after disconnection */ dev->udev = NULL; v4l2_device_disconnect(&dev->v4l2_dev); video_unregister_device(&dev->vdev); spi_unregister_controller(dev->ctlr); mutex_unlock(&dev->v4l2_lock); mutex_unlock(&dev->vb_queue_lock); v4l2_device_put(&dev->v4l2_dev); } static int msi2500_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct msi2500_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "\n"); strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, dev->vdev.name, sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); return 0; } /* Videobuf2 operations */ static int msi2500_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct msi2500_dev *dev = vb2_get_drv_priv(vq); dev_dbg(dev->dev, "nbuffers=%d\n", *nbuffers); /* Absolute min and max number of buffers available for mmap() */ *nbuffers = clamp_t(unsigned int, *nbuffers, 8, 32); *nplanes = 1; sizes[0] = PAGE_ALIGN(dev->buffersize); dev_dbg(dev->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]); return 0; } static void msi2500_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct msi2500_dev *dev = vb2_get_drv_priv(vb->vb2_queue); struct msi2500_frame_buf *buf = container_of(vbuf, struct msi2500_frame_buf, vb); unsigned long flags; /* Check the device has not disconnected between prep and queuing */ if (unlikely(!dev->udev)) { vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); return; } spin_lock_irqsave(&dev->queued_bufs_lock, flags); list_add_tail(&buf->list, &dev->queued_bufs); spin_unlock_irqrestore(&dev->queued_bufs_lock, flags); } #define CMD_WREG 0x41 #define CMD_START_STREAMING 0x43 #define CMD_STOP_STREAMING 0x45 #define CMD_READ_UNKNOWN 0x48 #define msi2500_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \ char *_direction; \ if (_t & USB_DIR_IN) \ _direction = "<<<"; \ else \ _direction = ">>>"; \ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, _i >> 8, \ _l & 0xff, _l >> 8, _direction, _l, _b); \ } static int msi2500_ctrl_msg(struct msi2500_dev *dev, u8 cmd, u32 data) { int ret; u8 request = cmd; u8 requesttype = USB_DIR_OUT | USB_TYPE_VENDOR; u16 value = (data >> 0) & 0xffff; u16 index = (data >> 16) & 0xffff; msi2500_dbg_usb_control_msg(dev->dev, request, requesttype, value, index, NULL, 0); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), request, requesttype, value, index, NULL, 0, 2000); if (ret) dev_err(dev->dev, "failed %d, cmd %02x, data %04x\n", ret, cmd, data); return ret; } static int msi2500_set_usb_adc(struct msi2500_dev *dev) { int ret; unsigned int f_vco, f_sr, div_n, k, k_cw, div_out; u32 reg3, reg4, reg7; struct v4l2_ctrl *bandwidth_auto; struct v4l2_ctrl *bandwidth; f_sr = dev->f_adc; /* set tuner, subdev, filters according to sampling rate */ bandwidth_auto = v4l2_ctrl_find(&dev->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO); if (v4l2_ctrl_g_ctrl(bandwidth_auto)) { bandwidth = v4l2_ctrl_find(&dev->hdl, V4L2_CID_RF_TUNER_BANDWIDTH); v4l2_ctrl_s_ctrl(bandwidth, dev->f_adc); } /* select stream format */ switch (dev->pixelformat) { case V4L2_SDR_FMT_CU8: reg7 = 0x000c9407; /* 504 */ break; case V4L2_SDR_FMT_CU16LE: reg7 = 0x00009407; /* 252 */ break; case V4L2_SDR_FMT_CS8: reg7 = 0x000c9407; /* 504 */ break; case MSI2500_PIX_FMT_SDR_MSI2500_384: reg7 = 0x0000a507; /* 384 */ break; case MSI2500_PIX_FMT_SDR_S12: reg7 = 0x00008507; /* 336 */ break; case V4L2_SDR_FMT_CS14LE: reg7 = 0x00009407; /* 252 */ break; default: reg7 = 0x000c9407; /* 504 */ break; } /* * Fractional-N synthesizer * * +----------------------------------------+ * v | * Fref +----+ +-------+ +-----+ +------+ +---+ * ------> | PD | --> | VCO | --> | /2 | ------> | /N.F | <-- | K | * +----+ +-------+ +-----+ +------+ +---+ * | * | * v * +-------+ +-----+ Fout * | /Rout | --> | /12 | ------> * +-------+ +-----+ */ /* * Synthesizer config is just a educated guess... * * [7:0] 0x03, register address * [8] 1, power control * [9] ?, power control * [12:10] output divider * [13] 0 ? * [14] 0 ? * [15] fractional MSB, bit 20 * [16:19] N * [23:20] ? * [24:31] 0x01 * * output divider * val div * 0 - (invalid) * 1 4 * 2 6 * 3 8 * 4 10 * 5 12 * 6 14 * 7 16 * * VCO 202000000 - 720000000++ */ #define F_REF 24000000 #define DIV_PRE_N 2 #define DIV_LO_OUT 12 reg3 = 0x01000303; reg4 = 0x00000004; /* XXX: Filters? AGC? VCO band? */ if (f_sr < 6000000) reg3 |= 0x1 << 20; else if (f_sr < 7000000) reg3 |= 0x5 << 20; else if (f_sr < 8500000) reg3 |= 0x9 << 20; else reg3 |= 0xd << 20; for (div_out = 4; div_out < 16; div_out += 2) { f_vco = f_sr * div_out * DIV_LO_OUT; dev_dbg(dev->dev, "div_out=%u f_vco=%u\n", div_out, f_vco); if (f_vco >= 202000000) break; } /* Calculate PLL integer and fractional control word. */ div_n = div_u64_rem(f_vco, DIV_PRE_N * F_REF, &k); k_cw = div_u64((u64) k * 0x200000, DIV_PRE_N * F_REF); reg3 |= div_n << 16; reg3 |= (div_out / 2 - 1) << 10; reg3 |= ((k_cw >> 20) & 0x000001) << 15; /* [20] */ reg4 |= ((k_cw >> 0) & 0x0fffff) << 8; /* [19:0] */ dev_dbg(dev->dev, "f_sr=%u f_vco=%u div_n=%u k=%u div_out=%u reg3=%08x reg4=%08x\n", f_sr, f_vco, div_n, k, div_out, reg3, reg4); ret = msi2500_ctrl_msg(dev, CMD_WREG, 0x00608008); if (ret) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, 0x00000c05); if (ret) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, 0x00020000); if (ret) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, 0x00480102); if (ret) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, 0x00f38008); if (ret) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, reg7); if (ret) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, reg4); if (ret) goto err; ret = msi2500_ctrl_msg(dev, CMD_WREG, reg3); err: return ret; } static int msi2500_start_streaming(struct vb2_queue *vq, unsigned int count) { struct msi2500_dev *dev = vb2_get_drv_priv(vq); int ret; dev_dbg(dev->dev, "\n"); if (!dev->udev) return -ENODEV; if (mutex_lock_interruptible(&dev->v4l2_lock)) return -ERESTARTSYS; /* wake-up tuner */ v4l2_subdev_call(dev->v4l2_subdev, core, s_power, 1); ret = msi2500_set_usb_adc(dev); ret = msi2500_isoc_init(dev); if (ret) msi2500_cleanup_queued_bufs(dev); ret = msi2500_ctrl_msg(dev, CMD_START_STREAMING, 0); mutex_unlock(&dev->v4l2_lock); return ret; } static void msi2500_stop_streaming(struct vb2_queue *vq) { struct msi2500_dev *dev = vb2_get_drv_priv(vq); dev_dbg(dev->dev, "\n"); mutex_lock(&dev->v4l2_lock); if (dev->udev) msi2500_isoc_cleanup(dev); msi2500_cleanup_queued_bufs(dev); /* according to tests, at least 700us delay is required */ msleep(20); if (dev->udev && !msi2500_ctrl_msg(dev, CMD_STOP_STREAMING, 0)) { /* sleep USB IF / ADC */ msi2500_ctrl_msg(dev, CMD_WREG, 0x01000003); } /* sleep tuner */ v4l2_subdev_call(dev->v4l2_subdev, core, s_power, 0); mutex_unlock(&dev->v4l2_lock); } static const struct vb2_ops msi2500_vb2_ops = { .queue_setup = msi2500_queue_setup, .buf_queue = msi2500_buf_queue, .start_streaming = msi2500_start_streaming, .stop_streaming = msi2500_stop_streaming, }; static int msi2500_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct msi2500_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "index=%d\n", f->index); if (f->index >= dev->num_formats) return -EINVAL; f->pixelformat = formats[f->index].pixelformat; return 0; } static int msi2500_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct msi2500_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&dev->pixelformat); f->fmt.sdr.pixelformat = dev->pixelformat; f->fmt.sdr.buffersize = dev->buffersize; return 0; } static int msi2500_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct msi2500_dev *dev = video_drvdata(file); struct vb2_queue *q = &dev->vb_queue; int i; dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&f->fmt.sdr.pixelformat); if (vb2_is_busy(q)) return -EBUSY; for (i = 0; i < dev->num_formats; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { dev->pixelformat = formats[i].pixelformat; dev->buffersize = formats[i].buffersize; f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } dev->pixelformat = formats[0].pixelformat; dev->buffersize = formats[0].buffersize; f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int msi2500_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct msi2500_dev *dev = video_drvdata(file); int i; dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&f->fmt.sdr.pixelformat); for (i = 0; i < dev->num_formats; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int msi2500_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { struct msi2500_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", v->index); if (v->index == 0) ret = 0; else if (v->index == 1) ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, s_tuner, v); else ret = -EINVAL; return ret; } static int msi2500_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct msi2500_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", v->index); if (v->index == 0) { strscpy(v->name, "Mirics MSi2500", sizeof(v->name)); v->type = V4L2_TUNER_ADC; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = 1200000; v->rangehigh = 15000000; ret = 0; } else if (v->index == 1) { ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, g_tuner, v); } else { ret = -EINVAL; } return ret; } static int msi2500_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct msi2500_dev *dev = video_drvdata(file); int ret = 0; dev_dbg(dev->dev, "tuner=%d type=%d\n", f->tuner, f->type); if (f->tuner == 0) { f->frequency = dev->f_adc; ret = 0; } else if (f->tuner == 1) { f->type = V4L2_TUNER_RF; ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, g_frequency, f); } else { ret = -EINVAL; } return ret; } static int msi2500_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct msi2500_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "tuner=%d type=%d frequency=%u\n", f->tuner, f->type, f->frequency); if (f->tuner == 0) { dev->f_adc = clamp_t(unsigned int, f->frequency, bands[0].rangelow, bands[0].rangehigh); dev_dbg(dev->dev, "ADC frequency=%u Hz\n", dev->f_adc); ret = msi2500_set_usb_adc(dev); } else if (f->tuner == 1) { ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, s_frequency, f); } else { ret = -EINVAL; } return ret; } static int msi2500_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { struct msi2500_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "tuner=%d type=%d index=%d\n", band->tuner, band->type, band->index); if (band->tuner == 0) { if (band->index >= ARRAY_SIZE(bands)) { ret = -EINVAL; } else { *band = bands[band->index]; ret = 0; } } else if (band->tuner == 1) { ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, enum_freq_bands, band); } else { ret = -EINVAL; } return ret; } static const struct v4l2_ioctl_ops msi2500_ioctl_ops = { .vidioc_querycap = msi2500_querycap, .vidioc_enum_fmt_sdr_cap = msi2500_enum_fmt_sdr_cap, .vidioc_g_fmt_sdr_cap = msi2500_g_fmt_sdr_cap, .vidioc_s_fmt_sdr_cap = msi2500_s_fmt_sdr_cap, .vidioc_try_fmt_sdr_cap = msi2500_try_fmt_sdr_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_tuner = msi2500_g_tuner, .vidioc_s_tuner = msi2500_s_tuner, .vidioc_g_frequency = msi2500_g_frequency, .vidioc_s_frequency = msi2500_s_frequency, .vidioc_enum_freq_bands = msi2500_enum_freq_bands, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_log_status = v4l2_ctrl_log_status, }; static const struct v4l2_file_operations msi2500_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct video_device msi2500_template = { .name = "Mirics MSi3101 SDR Dongle", .release = video_device_release_empty, .fops = &msi2500_fops, .ioctl_ops = &msi2500_ioctl_ops, }; static void msi2500_video_release(struct v4l2_device *v) { struct msi2500_dev *dev = container_of(v, struct msi2500_dev, v4l2_dev); v4l2_ctrl_handler_free(&dev->hdl); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); } static int msi2500_transfer_one_message(struct spi_controller *ctlr, struct spi_message *m) { struct msi2500_dev *dev = spi_controller_get_devdata(ctlr); struct spi_transfer *t; int ret = 0; u32 data; list_for_each_entry(t, &m->transfers, transfer_list) { dev_dbg(dev->dev, "msg=%*ph\n", t->len, t->tx_buf); data = 0x09; /* reg 9 is SPI adapter */ data |= ((u8 *)t->tx_buf)[0] << 8; data |= ((u8 *)t->tx_buf)[1] << 16; data |= ((u8 *)t->tx_buf)[2] << 24; ret = msi2500_ctrl_msg(dev, CMD_WREG, data); } m->status = ret; spi_finalize_current_message(ctlr); return ret; } static int msi2500_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct msi2500_dev *dev; struct v4l2_subdev *sd; struct spi_controller *ctlr; int ret; static struct spi_board_info board_info = { .modalias = "msi001", .bus_num = 0, .chip_select = 0, .max_speed_hz = 12000000, }; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err; } mutex_init(&dev->v4l2_lock); mutex_init(&dev->vb_queue_lock); spin_lock_init(&dev->queued_bufs_lock); INIT_LIST_HEAD(&dev->queued_bufs); dev->dev = &intf->dev; dev->udev = interface_to_usbdev(intf); dev->f_adc = bands[0].rangelow; dev->pixelformat = formats[0].pixelformat; dev->buffersize = formats[0].buffersize; dev->num_formats = NUM_FORMATS; if (!msi2500_emulated_fmt) dev->num_formats -= 2; /* Init videobuf2 queue structure */ dev->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE; dev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ; dev->vb_queue.drv_priv = dev; dev->vb_queue.buf_struct_size = sizeof(struct msi2500_frame_buf); dev->vb_queue.ops = &msi2500_vb2_ops; dev->vb_queue.mem_ops = &vb2_vmalloc_memops; dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; dev->vb_queue.lock = &dev->vb_queue_lock; ret = vb2_queue_init(&dev->vb_queue); if (ret) { dev_err(dev->dev, "Could not initialize vb2 queue\n"); goto err_free_mem; } /* Init video_device structure */ dev->vdev = msi2500_template; dev->vdev.queue = &dev->vb_queue; video_set_drvdata(&dev->vdev, dev); /* Register the v4l2_device structure */ dev->v4l2_dev.release = msi2500_video_release; ret = v4l2_device_register(&intf->dev, &dev->v4l2_dev); if (ret) { dev_err(dev->dev, "Failed to register v4l2-device (%d)\n", ret); goto err_free_mem; } /* SPI host adapter */ ctlr = spi_alloc_host(dev->dev, 0); if (ctlr == NULL) { ret = -ENOMEM; goto err_unregister_v4l2_dev; } dev->ctlr = ctlr; ctlr->bus_num = -1; ctlr->num_chipselect = 1; ctlr->transfer_one_message = msi2500_transfer_one_message; spi_controller_set_devdata(ctlr, dev); ret = spi_register_controller(ctlr); if (ret) { spi_controller_put(ctlr); goto err_unregister_v4l2_dev; } /* load v4l2 subdevice */ sd = v4l2_spi_new_subdev(&dev->v4l2_dev, ctlr, &board_info); dev->v4l2_subdev = sd; if (sd == NULL) { dev_err(dev->dev, "cannot get v4l2 subdevice\n"); ret = -ENODEV; goto err_unregister_controller; } /* Register controls */ v4l2_ctrl_handler_init(&dev->hdl, 0); if (dev->hdl.error) { ret = dev->hdl.error; dev_err(dev->dev, "Could not initialize controls\n"); goto err_free_controls; } /* currently all controls are from subdev */ v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL, true); dev->v4l2_dev.ctrl_handler = &dev->hdl; dev->vdev.v4l2_dev = &dev->v4l2_dev; dev->vdev.lock = &dev->v4l2_lock; dev->vdev.device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_TUNER; ret = video_register_device(&dev->vdev, VFL_TYPE_SDR, -1); if (ret) { dev_err(dev->dev, "Failed to register as video device (%d)\n", ret); goto err_unregister_v4l2_dev; } dev_info(dev->dev, "Registered as %s\n", video_device_node_name(&dev->vdev)); dev_notice(dev->dev, "SDR API is still slightly experimental and functionality changes may follow\n"); return 0; err_free_controls: v4l2_ctrl_handler_free(&dev->hdl); err_unregister_controller: spi_unregister_controller(dev->ctlr); err_unregister_v4l2_dev: v4l2_device_unregister(&dev->v4l2_dev); err_free_mem: kfree(dev); err: return ret; } /* USB device ID list */ static const struct usb_device_id msi2500_id_table[] = { {USB_DEVICE(0x1df7, 0x2500)}, /* Mirics MSi3101 SDR Dongle */ {USB_DEVICE(0x2040, 0xd300)}, /* Hauppauge WinTV 133559 LF */ {} }; MODULE_DEVICE_TABLE(usb, msi2500_id_table); /* USB subsystem interface */ static struct usb_driver msi2500_driver = { .name = KBUILD_MODNAME, .probe = msi2500_probe, .disconnect = msi2500_disconnect, .id_table = msi2500_id_table, }; module_usb_driver(msi2500_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Mirics MSi3101 SDR Dongle"); MODULE_LICENSE("GPL");
61 62 61 61 61 1 1 69 69 69 69 69 69 69 69 69 69 69 69 69 69 69 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 // SPDX-License-Identifier: GPL-2.0-or-later /* Task credentials management - see Documentation/security/credentials.rst * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) "CRED: " fmt #include <linux/export.h> #include <linux/cred.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/coredump.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/init_task.h> #include <linux/security.h> #include <linux/binfmts.h> #include <linux/cn_proc.h> #include <linux/uidgid.h> #if 0 #define kdebug(FMT, ...) \ printk("[%-5.5s%5u] " FMT "\n", \ current->comm, current->pid, ##__VA_ARGS__) #else #define kdebug(FMT, ...) \ do { \ if (0) \ no_printk("[%-5.5s%5u] " FMT "\n", \ current->comm, current->pid, ##__VA_ARGS__); \ } while (0) #endif static struct kmem_cache *cred_jar; /* init to 2 - one for init_task, one to ensure it is never freed */ static struct group_info init_groups = { .usage = REFCOUNT_INIT(2) }; /* * The initial credentials for the initial task */ struct cred init_cred = { .usage = ATOMIC_INIT(4), .uid = GLOBAL_ROOT_UID, .gid = GLOBAL_ROOT_GID, .suid = GLOBAL_ROOT_UID, .sgid = GLOBAL_ROOT_GID, .euid = GLOBAL_ROOT_UID, .egid = GLOBAL_ROOT_GID, .fsuid = GLOBAL_ROOT_UID, .fsgid = GLOBAL_ROOT_GID, .securebits = SECUREBITS_DEFAULT, .cap_inheritable = CAP_EMPTY_SET, .cap_permitted = CAP_FULL_SET, .cap_effective = CAP_FULL_SET, .cap_bset = CAP_FULL_SET, .user = INIT_USER, .user_ns = &init_user_ns, .group_info = &init_groups, .ucounts = &init_ucounts, }; /* * The RCU callback to actually dispose of a set of credentials */ static void put_cred_rcu(struct rcu_head *rcu) { struct cred *cred = container_of(rcu, struct cred, rcu); kdebug("put_cred_rcu(%p)", cred); if (atomic_long_read(&cred->usage) != 0) panic("CRED: put_cred_rcu() sees %p with usage %ld\n", cred, atomic_long_read(&cred->usage)); security_cred_free(cred); key_put(cred->session_keyring); key_put(cred->process_keyring); key_put(cred->thread_keyring); key_put(cred->request_key_auth); if (cred->group_info) put_group_info(cred->group_info); free_uid(cred->user); if (cred->ucounts) put_ucounts(cred->ucounts); put_user_ns(cred->user_ns); kmem_cache_free(cred_jar, cred); } /** * __put_cred - Destroy a set of credentials * @cred: The record to release * * Destroy a set of credentials on which no references remain. */ void __put_cred(struct cred *cred) { kdebug("__put_cred(%p{%ld})", cred, atomic_long_read(&cred->usage)); BUG_ON(atomic_long_read(&cred->usage) != 0); BUG_ON(cred == current->cred); BUG_ON(cred == current->real_cred); if (cred->non_rcu) put_cred_rcu(&cred->rcu); else call_rcu(&cred->rcu, put_cred_rcu); } EXPORT_SYMBOL(__put_cred); /* * Clean up a task's credentials when it exits */ void exit_creds(struct task_struct *tsk) { struct cred *real_cred, *cred; kdebug("exit_creds(%u,%p,%p,{%ld})", tsk->pid, tsk->real_cred, tsk->cred, atomic_long_read(&tsk->cred->usage)); real_cred = (struct cred *) tsk->real_cred; tsk->real_cred = NULL; cred = (struct cred *) tsk->cred; tsk->cred = NULL; if (real_cred == cred) { put_cred_many(cred, 2); } else { put_cred(real_cred); put_cred(cred); } #ifdef CONFIG_KEYS_REQUEST_CACHE key_put(tsk->cached_requested_key); tsk->cached_requested_key = NULL; #endif } /** * get_task_cred - Get another task's objective credentials * @task: The task to query * * Get the objective credentials of a task, pinning them so that they can't go * away. Accessing a task's credentials directly is not permitted. * * The caller must also make sure task doesn't get deleted, either by holding a * ref on task or by holding tasklist_lock to prevent it from being unlinked. */ const struct cred *get_task_cred(struct task_struct *task) { const struct cred *cred; rcu_read_lock(); do { cred = __task_cred((task)); BUG_ON(!cred); } while (!get_cred_rcu(cred)); rcu_read_unlock(); return cred; } EXPORT_SYMBOL(get_task_cred); /* * Allocate blank credentials, such that the credentials can be filled in at a * later date without risk of ENOMEM. */ struct cred *cred_alloc_blank(void) { struct cred *new; new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); if (!new) return NULL; atomic_long_set(&new->usage, 1); if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0) goto error; return new; error: abort_creds(new); return NULL; } /** * prepare_creds - Prepare a new set of credentials for modification * * Prepare a new set of task credentials for modification. A task's creds * shouldn't generally be modified directly, therefore this function is used to * prepare a new copy, which the caller then modifies and then commits by * calling commit_creds(). * * Preparation involves making a copy of the objective creds for modification. * * Returns a pointer to the new creds-to-be if successful, NULL otherwise. * * Call commit_creds() or abort_creds() to clean up. */ struct cred *prepare_creds(void) { struct task_struct *task = current; const struct cred *old; struct cred *new; new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; kdebug("prepare_creds() alloc %p", new); old = task->cred; memcpy(new, old, sizeof(struct cred)); new->non_rcu = 0; atomic_long_set(&new->usage, 1); get_group_info(new->group_info); get_uid(new->user); get_user_ns(new->user_ns); #ifdef CONFIG_KEYS key_get(new->session_keyring); key_get(new->process_keyring); key_get(new->thread_keyring); key_get(new->request_key_auth); #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif new->ucounts = get_ucounts(new->ucounts); if (!new->ucounts) goto error; if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; return new; error: abort_creds(new); return NULL; } EXPORT_SYMBOL(prepare_creds); /* * Prepare credentials for current to perform an execve() * - The caller must hold ->cred_guard_mutex */ struct cred *prepare_exec_creds(void) { struct cred *new; new = prepare_creds(); if (!new) return new; #ifdef CONFIG_KEYS /* newly exec'd tasks don't get a thread keyring */ key_put(new->thread_keyring); new->thread_keyring = NULL; /* inherit the session keyring; new process keyring */ key_put(new->process_keyring); new->process_keyring = NULL; #endif new->suid = new->fsuid = new->euid; new->sgid = new->fsgid = new->egid; return new; } /* * Copy credentials for the new process created by fork() * * We share if we can, but under some circumstances we have to generate a new * set. * * The new process gets the current process's subjective credentials as its * objective and subjective credentials */ int copy_creds(struct task_struct *p, unsigned long clone_flags) { struct cred *new; int ret; #ifdef CONFIG_KEYS_REQUEST_CACHE p->cached_requested_key = NULL; #endif if ( #ifdef CONFIG_KEYS !p->cred->thread_keyring && #endif clone_flags & CLONE_THREAD ) { p->real_cred = get_cred_many(p->cred, 2); kdebug("share_creds(%p{%ld})", p->cred, atomic_long_read(&p->cred->usage)); inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); return 0; } new = prepare_creds(); if (!new) return -ENOMEM; if (clone_flags & CLONE_NEWUSER) { ret = create_user_ns(new); if (ret < 0) goto error_put; ret = set_cred_ucounts(new); if (ret < 0) goto error_put; } #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ if (new->thread_keyring) { key_put(new->thread_keyring); new->thread_keyring = NULL; if (clone_flags & CLONE_THREAD) install_thread_keyring_to_cred(new); } /* The process keyring is only shared between the threads in a process; * anything outside of those threads doesn't inherit. */ if (!(clone_flags & CLONE_THREAD)) { key_put(new->process_keyring); new->process_keyring = NULL; } #endif p->cred = p->real_cred = get_cred(new); inc_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); return 0; error_put: put_cred(new); return ret; } static bool cred_cap_issubset(const struct cred *set, const struct cred *subset) { const struct user_namespace *set_ns = set->user_ns; const struct user_namespace *subset_ns = subset->user_ns; /* If the two credentials are in the same user namespace see if * the capabilities of subset are a subset of set. */ if (set_ns == subset_ns) return cap_issubset(subset->cap_permitted, set->cap_permitted); /* The credentials are in a different user namespaces * therefore one is a subset of the other only if a set is an * ancestor of subset and set->euid is owner of subset or one * of subsets ancestors. */ for (;subset_ns != &init_user_ns; subset_ns = subset_ns->parent) { if ((set_ns == subset_ns->parent) && uid_eq(subset_ns->owner, set->euid)) return true; } return false; } /** * commit_creds - Install new credentials upon the current task * @new: The credentials to be assigned * * Install a new set of credentials to the current task, using RCU to replace * the old set. Both the objective and the subjective credentials pointers are * updated. This function may not be called if the subjective credentials are * in an overridden state. * * This function eats the caller's reference to the new credentials. * * Always returns 0 thus allowing this function to be tail-called at the end * of, say, sys_setgid(). */ int commit_creds(struct cred *new) { struct task_struct *task = current; const struct cred *old = task->real_cred; kdebug("commit_creds(%p{%ld})", new, atomic_long_read(&new->usage)); BUG_ON(task->cred != old); BUG_ON(atomic_long_read(&new->usage) < 1); get_cred(new); /* we will require a ref for the subj creds too */ /* dumpability changes */ if (!uid_eq(old->euid, new->euid) || !gid_eq(old->egid, new->egid) || !uid_eq(old->fsuid, new->fsuid) || !gid_eq(old->fsgid, new->fsgid) || !cred_cap_issubset(old, new)) { if (task->mm) set_dumpable(task->mm, suid_dumpable); task->pdeath_signal = 0; /* * If a task drops privileges and becomes nondumpable, * the dumpability change must become visible before * the credential change; otherwise, a __ptrace_may_access() * racing with this change may be able to attach to a task it * shouldn't be able to attach to (as if the task had dropped * privileges without becoming nondumpable). * Pairs with a read barrier in __ptrace_may_access(). */ smp_wmb(); } /* alter the thread keyring */ if (!uid_eq(new->fsuid, old->fsuid)) key_fsuid_changed(new); if (!gid_eq(new->fsgid, old->fsgid)) key_fsgid_changed(new); /* do it * RLIMIT_NPROC limits on user->processes have already been checked * in set_user(). */ if (new->user != old->user || new->user_ns != old->user_ns) inc_rlimit_ucounts(new->ucounts, UCOUNT_RLIMIT_NPROC, 1); rcu_assign_pointer(task->real_cred, new); rcu_assign_pointer(task->cred, new); if (new->user != old->user || new->user_ns != old->user_ns) dec_rlimit_ucounts(old->ucounts, UCOUNT_RLIMIT_NPROC, 1); /* send notifications */ if (!uid_eq(new->uid, old->uid) || !uid_eq(new->euid, old->euid) || !uid_eq(new->suid, old->suid) || !uid_eq(new->fsuid, old->fsuid)) proc_id_connector(task, PROC_EVENT_UID); if (!gid_eq(new->gid, old->gid) || !gid_eq(new->egid, old->egid) || !gid_eq(new->sgid, old->sgid) || !gid_eq(new->fsgid, old->fsgid)) proc_id_connector(task, PROC_EVENT_GID); /* release the old obj and subj refs both */ put_cred_many(old, 2); return 0; } EXPORT_SYMBOL(commit_creds); /** * abort_creds - Discard a set of credentials and unlock the current task * @new: The credentials that were going to be applied * * Discard a set of credentials that were under construction and unlock the * current task. */ void abort_creds(struct cred *new) { kdebug("abort_creds(%p{%ld})", new, atomic_long_read(&new->usage)); BUG_ON(atomic_long_read(&new->usage) < 1); put_cred(new); } EXPORT_SYMBOL(abort_creds); /** * cred_fscmp - Compare two credentials with respect to filesystem access. * @a: The first credential * @b: The second credential * * cred_cmp() will return zero if both credentials have the same * fsuid, fsgid, and supplementary groups. That is, if they will both * provide the same access to files based on mode/uid/gid. * If the credentials are different, then either -1 or 1 will * be returned depending on whether @a comes before or after @b * respectively in an arbitrary, but stable, ordering of credentials. * * Return: -1, 0, or 1 depending on comparison */ int cred_fscmp(const struct cred *a, const struct cred *b) { struct group_info *ga, *gb; int g; if (a == b) return 0; if (uid_lt(a->fsuid, b->fsuid)) return -1; if (uid_gt(a->fsuid, b->fsuid)) return 1; if (gid_lt(a->fsgid, b->fsgid)) return -1; if (gid_gt(a->fsgid, b->fsgid)) return 1; ga = a->group_info; gb = b->group_info; if (ga == gb) return 0; if (ga == NULL) return -1; if (gb == NULL) return 1; if (ga->ngroups < gb->ngroups) return -1; if (ga->ngroups > gb->ngroups) return 1; for (g = 0; g < ga->ngroups; g++) { if (gid_lt(ga->gid[g], gb->gid[g])) return -1; if (gid_gt(ga->gid[g], gb->gid[g])) return 1; } return 0; } EXPORT_SYMBOL(cred_fscmp); int set_cred_ucounts(struct cred *new) { struct ucounts *new_ucounts, *old_ucounts = new->ucounts; /* * This optimization is needed because alloc_ucounts() uses locks * for table lookups. */ if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid)) return 0; if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid))) return -EAGAIN; new->ucounts = new_ucounts; put_ucounts(old_ucounts); return 0; } /* * initialise the credentials stuff */ void __init cred_init(void) { /* allocate a slab in which we can store credentials */ cred_jar = KMEM_CACHE(cred, SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); } /** * prepare_kernel_cred - Prepare a set of credentials for a kernel service * @daemon: A userspace daemon to be used as a reference * * Prepare a set of credentials for a kernel service. This can then be used to * override a task's own credentials so that work can be done on behalf of that * task that requires a different subjective context. * * @daemon is used to provide a base cred, with the security data derived from * that; if this is "&init_task", they'll be set to 0, no groups, full * capabilities, and no keys. * * The caller may change these controls afterwards if desired. * * Returns the new credentials or NULL if out of memory. */ struct cred *prepare_kernel_cred(struct task_struct *daemon) { const struct cred *old; struct cred *new; if (WARN_ON_ONCE(!daemon)) return NULL; new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; kdebug("prepare_kernel_cred() alloc %p", new); old = get_task_cred(daemon); *new = *old; new->non_rcu = 0; atomic_long_set(&new->usage, 1); get_uid(new->user); get_user_ns(new->user_ns); get_group_info(new->group_info); #ifdef CONFIG_KEYS new->session_keyring = NULL; new->process_keyring = NULL; new->thread_keyring = NULL; new->request_key_auth = NULL; new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif new->ucounts = get_ucounts(new->ucounts); if (!new->ucounts) goto error; if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) goto error; put_cred(old); return new; error: put_cred(new); put_cred(old); return NULL; } EXPORT_SYMBOL(prepare_kernel_cred); /** * set_security_override - Set the security ID in a set of credentials * @new: The credentials to alter * @secid: The LSM security ID to set * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. */ int set_security_override(struct cred *new, u32 secid) { return security_kernel_act_as(new, secid); } EXPORT_SYMBOL(set_security_override); /** * set_security_override_from_ctx - Set the security ID in a set of credentials * @new: The credentials to alter * @secctx: The LSM security context to generate the security ID from. * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. The * security ID is specified in string form as a security context to be * interpreted by the LSM. */ int set_security_override_from_ctx(struct cred *new, const char *secctx) { u32 secid; int ret; ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); if (ret < 0) return ret; return set_security_override(new, secid); } EXPORT_SYMBOL(set_security_override_from_ctx); /** * set_create_files_as - Set the LSM file create context in a set of credentials * @new: The credentials to alter * @inode: The inode to take the context from * * Change the LSM file creation context in a set of credentials to be the same * as the object context of the specified inode, so that the new inodes have * the same MAC context as that inode. */ int set_create_files_as(struct cred *new, struct inode *inode) { if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid)) return -EINVAL; new->fsuid = inode->i_uid; new->fsgid = inode->i_gid; return security_kernel_create_files_as(new, inode); } EXPORT_SYMBOL(set_create_files_as);
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * V4L2 controls support header. * * Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl> */ #ifndef _V4L2_CTRLS_H #define _V4L2_CTRLS_H #include <linux/list.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <media/media-request.h> /* forward references */ struct file; struct poll_table_struct; struct v4l2_ctrl; struct v4l2_ctrl_handler; struct v4l2_ctrl_helper; struct v4l2_fh; struct v4l2_fwnode_device_properties; struct v4l2_subdev; struct v4l2_subscribed_event; struct video_device; /** * union v4l2_ctrl_ptr - A pointer to a control value. * @p_s32: Pointer to a 32-bit signed value. * @p_s64: Pointer to a 64-bit signed value. * @p_u8: Pointer to a 8-bit unsigned value. * @p_u16: Pointer to a 16-bit unsigned value. * @p_u32: Pointer to a 32-bit unsigned value. * @p_char: Pointer to a string. * @p_mpeg2_sequence: Pointer to a MPEG2 sequence structure. * @p_mpeg2_picture: Pointer to a MPEG2 picture structure. * @p_mpeg2_quantisation: Pointer to a MPEG2 quantisation data structure. * @p_fwht_params: Pointer to a FWHT stateless parameters structure. * @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps. * @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps. * @p_h264_scaling_matrix: Pointer to a struct v4l2_ctrl_h264_scaling_matrix. * @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params. * @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params. * @p_h264_pred_weights: Pointer to a struct v4l2_ctrl_h264_pred_weights. * @p_vp8_frame: Pointer to a VP8 frame params structure. * @p_vp9_compressed_hdr_probs: Pointer to a VP9 frame compressed header probs structure. * @p_vp9_frame: Pointer to a VP9 frame params structure. * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure. * @p_hevc_pps: Pointer to an HEVC picture parameter set structure. * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure. * @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure. * @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure. * @p_area: Pointer to an area. * @p_av1_sequence: Pointer to an AV1 sequence structure. * @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure. * @p_av1_frame: Pointer to an AV1 frame structure. * @p_av1_film_grain: Pointer to an AV1 film grain structure. * @p_rect: Pointer to a rectangle. * @p: Pointer to a compound value. * @p_const: Pointer to a constant compound value. */ union v4l2_ctrl_ptr { s32 *p_s32; s64 *p_s64; u8 *p_u8; u16 *p_u16; u32 *p_u32; char *p_char; struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation; struct v4l2_ctrl_fwht_params *p_fwht_params; struct v4l2_ctrl_h264_sps *p_h264_sps; struct v4l2_ctrl_h264_pps *p_h264_pps; struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; struct v4l2_ctrl_vp8_frame *p_vp8_frame; struct v4l2_ctrl_hevc_sps *p_hevc_sps; struct v4l2_ctrl_hevc_pps *p_hevc_pps; struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params; struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs; struct v4l2_ctrl_vp9_frame *p_vp9_frame; struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll; struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; struct v4l2_area *p_area; struct v4l2_ctrl_av1_sequence *p_av1_sequence; struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry; struct v4l2_ctrl_av1_frame *p_av1_frame; struct v4l2_ctrl_av1_film_grain *p_av1_film_grain; struct v4l2_rect *p_rect; void *p; const void *p_const; }; /** * v4l2_ctrl_ptr_create() - Helper function to return a v4l2_ctrl_ptr from a * void pointer * @ptr: The void pointer */ static inline union v4l2_ctrl_ptr v4l2_ctrl_ptr_create(void *ptr) { union v4l2_ctrl_ptr p = { .p = ptr }; return p; } /** * struct v4l2_ctrl_ops - The control operations that the driver has to provide. * * @g_volatile_ctrl: Get a new value for this control. Generally only relevant * for volatile (and usually read-only) controls such as a control * that returns the current signal strength which changes * continuously. * If not set, then the currently cached value will be returned. * @try_ctrl: Test whether the control's value is valid. Only relevant when * the usual min/max/step checks are not sufficient. * @s_ctrl: Actually set the new control value. s_ctrl is compulsory. The * ctrl->handler->lock is held when these ops are called, so no * one else can access controls owned by that handler. */ struct v4l2_ctrl_ops { int (*g_volatile_ctrl)(struct v4l2_ctrl *ctrl); int (*try_ctrl)(struct v4l2_ctrl *ctrl); int (*s_ctrl)(struct v4l2_ctrl *ctrl); }; /** * struct v4l2_ctrl_type_ops - The control type operations that the driver * has to provide. * * @equal: return true if all ctrl->elems array elements are equal. * @init: initialize the value for array elements from from_idx to ctrl->elems. * @minimum: set the value to the minimum value of the control. * @maximum: set the value to the maximum value of the control. * @log: log the value. * @validate: validate the value for ctrl->new_elems array elements. * Return 0 on success and a negative value otherwise. */ struct v4l2_ctrl_type_ops { bool (*equal)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, union v4l2_ctrl_ptr ptr); void (*minimum)(const struct v4l2_ctrl *ctrl, u32 idx, union v4l2_ctrl_ptr ptr); void (*maximum)(const struct v4l2_ctrl *ctrl, u32 idx, union v4l2_ctrl_ptr ptr); void (*log)(const struct v4l2_ctrl *ctrl); int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); }; /** * typedef v4l2_ctrl_notify_fnc - typedef for a notify argument with a function * that should be called when a control value has changed. * * @ctrl: pointer to struct &v4l2_ctrl * @priv: control private data * * This typedef definition is used as an argument to v4l2_ctrl_notify() * and as an argument at struct &v4l2_ctrl_handler. */ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); /** * struct v4l2_ctrl - The control structure. * * @node: The list node. * @ev_subs: The list of control event subscriptions. * @handler: The handler that owns the control. * @cluster: Point to start of cluster array. * @ncontrols: Number of controls in cluster array. * @done: Internal flag: set for each processed control. * @is_new: Set when the user specified a new value for this control. It * is also set when called from v4l2_ctrl_handler_setup(). Drivers * should never set this flag. * @has_changed: Set when the current value differs from the new value. Drivers * should never use this flag. * @is_private: If set, then this control is private to its handler and it * will not be added to any other handlers. Drivers can set * this flag. * @is_auto: If set, then this control selects whether the other cluster * members are in 'automatic' mode or 'manual' mode. This is * used for autogain/gain type clusters. Drivers should never * set this flag directly. * @is_int: If set, then this control has a simple integer value (i.e. it * uses ctrl->val). * @is_string: If set, then this control has type %V4L2_CTRL_TYPE_STRING. * @is_ptr: If set, then this control is an array and/or has type >= * %V4L2_CTRL_COMPOUND_TYPES * and/or has type %V4L2_CTRL_TYPE_STRING. In other words, &struct * v4l2_ext_control uses field p to point to the data. * @is_array: If set, then this control contains an N-dimensional array. * @is_dyn_array: If set, then this control contains a dynamically sized 1-dimensional array. * If this is set, then @is_array is also set. * @has_volatiles: If set, then one or more members of the cluster are volatile. * Drivers should never touch this flag. * @call_notify: If set, then call the handler's notify function whenever the * control's value changes. * @manual_mode_value: If the is_auto flag is set, then this is the value * of the auto control that determines if that control is in * manual mode. So if the value of the auto control equals this * value, then the whole cluster is in manual mode. Drivers should * never set this flag directly. * @ops: The control ops. * @type_ops: The control type ops. * @id: The control ID. * @name: The control name. * @type: The control type. * @minimum: The control's minimum value. * @maximum: The control's maximum value. * @default_value: The control's default value. * @step: The control's step value for non-menu controls. * @elems: The number of elements in the N-dimensional array. * @elem_size: The size in bytes of the control. * @new_elems: The number of elements in p_new. This is the same as @elems, * except for dynamic arrays. In that case it is in the range of * 1 to @p_array_alloc_elems. * @dims: The size of each dimension. * @nr_of_dims:The number of dimensions in @dims. * @menu_skip_mask: The control's skip mask for menu controls. This makes it * easy to skip menu items that are not valid. If bit X is set, * then menu item X is skipped. Of course, this only works for * menus with <= 32 menu items. There are no menus that come * close to that number, so this is OK. Should we ever need more, * then this will have to be extended to a u64 or a bit array. * @qmenu: A const char * array for all menu items. Array entries that are * empty strings ("") correspond to non-existing menu items (this * is in addition to the menu_skip_mask above). The last entry * must be NULL. * Used only if the @type is %V4L2_CTRL_TYPE_MENU. * @qmenu_int: A 64-bit integer array for with integer menu items. * The size of array must be equal to the menu size, e. g.: * :math:`ceil(\frac{maximum - minimum}{step}) + 1`. * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU. * @flags: The control's flags. * @priv: The control's private pointer. For use by the driver. It is * untouched by the control framework. Note that this pointer is * not freed when the control is deleted. Should this be needed * then a new internal bitfield can be added to tell the framework * to free this pointer. * @p_array: Pointer to the allocated array. Only valid if @is_array is true. * @p_array_alloc_elems: The number of elements in the allocated * array for both the cur and new values. So @p_array is actually * sized for 2 * @p_array_alloc_elems * @elem_size. Only valid if * @is_array is true. * @cur: Structure to store the current value. * @cur.val: The control's current value, if the @type is represented via * a u32 integer (see &enum v4l2_ctrl_type). * @val: The control's new s32 value. * @p_def: The control's default value represented via a union which * provides a standard way of accessing control types * through a pointer (for compound controls only). * @p_min: The control's minimum value represented via a union which * provides a standard way of accessing control types * through a pointer (for compound controls only). * @p_max: The control's maximum value represented via a union which * provides a standard way of accessing control types * through a pointer (for compound controls only). * @p_cur: The control's current value represented via a union which * provides a standard way of accessing control types * through a pointer. * @p_new: The control's new value represented via a union which provides * a standard way of accessing control types * through a pointer. */ struct v4l2_ctrl { /* Administrative fields */ struct list_head node; struct list_head ev_subs; struct v4l2_ctrl_handler *handler; struct v4l2_ctrl **cluster; unsigned int ncontrols; unsigned int done:1; unsigned int is_new:1; unsigned int has_changed:1; unsigned int is_private:1; unsigned int is_auto:1; unsigned int is_int:1; unsigned int is_string:1; unsigned int is_ptr:1; unsigned int is_array:1; unsigned int is_dyn_array:1; unsigned int has_volatiles:1; unsigned int call_notify:1; unsigned int manual_mode_value:8; const struct v4l2_ctrl_ops *ops; const struct v4l2_ctrl_type_ops *type_ops; u32 id; const char *name; enum v4l2_ctrl_type type; s64 minimum, maximum, default_value; u32 elems; u32 elem_size; u32 new_elems; u32 dims[V4L2_CTRL_MAX_DIMS]; u32 nr_of_dims; union { u64 step; u64 menu_skip_mask; }; union { const char * const *qmenu; const s64 *qmenu_int; }; unsigned long flags; void *priv; void *p_array; u32 p_array_alloc_elems; s32 val; struct { s32 val; } cur; union v4l2_ctrl_ptr p_def; union v4l2_ctrl_ptr p_min; union v4l2_ctrl_ptr p_max; union v4l2_ctrl_ptr p_new; union v4l2_ctrl_ptr p_cur; }; /** * struct v4l2_ctrl_ref - The control reference. * * @node: List node for the sorted list. * @next: Single-link list node for the hash. * @ctrl: The actual control information. * @helper: Pointer to helper struct. Used internally in * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``. * @from_other_dev: If true, then @ctrl was defined in another * device than the &struct v4l2_ctrl_handler. * @req_done: Internal flag: if the control handler containing this control * reference is bound to a media request, then this is set when * the control has been applied. This prevents applying controls * from a cluster with multiple controls twice (when the first * control of a cluster is applied, they all are). * @p_req_valid: If set, then p_req contains the control value for the request. * @p_req_array_enomem: If set, then p_req is invalid since allocating space for * an array failed. Attempting to read this value shall * result in ENOMEM. Only valid if ctrl->is_array is true. * @p_req_array_alloc_elems: The number of elements allocated for the * array. Only valid if @p_req_valid and ctrl->is_array are * true. * @p_req_elems: The number of elements in @p_req. This is the same as * ctrl->elems, except for dynamic arrays. In that case it is in * the range of 1 to @p_req_array_alloc_elems. Only valid if * @p_req_valid is true. * @p_req: If the control handler containing this control reference * is bound to a media request, then this points to the * value of the control that must be applied when the request * is executed, or to the value of the control at the time * that the request was completed. If @p_req_valid is false, * then this control was never set for this request and the * control will not be updated when this request is applied. * * Each control handler has a list of these refs. The list_head is used to * keep a sorted-by-control-ID list of all controls, while the next pointer * is used to link the control in the hash's bucket. */ struct v4l2_ctrl_ref { struct list_head node; struct v4l2_ctrl_ref *next; struct v4l2_ctrl *ctrl; struct v4l2_ctrl_helper *helper; bool from_other_dev; bool req_done; bool p_req_valid; bool p_req_array_enomem; u32 p_req_array_alloc_elems; u32 p_req_elems; union v4l2_ctrl_ptr p_req; }; /** * struct v4l2_ctrl_handler - The control handler keeps track of all the * controls: both the controls owned by the handler and those inherited * from other handlers. * * @_lock: Default for "lock". * @lock: Lock to control access to this handler and its controls. * May be replaced by the user right after init. * @ctrls: The list of controls owned by this handler. * @ctrl_refs: The list of control references. * @cached: The last found control reference. It is common that the same * control is needed multiple times, so this is a simple * optimization. * @buckets: Buckets for the hashing. Allows for quick control lookup. * @notify: A notify callback that is called whenever the control changes * value. * Note that the handler's lock is held when the notify function * is called! * @notify_priv: Passed as argument to the v4l2_ctrl notify callback. * @nr_of_buckets: Total number of buckets in the array. * @error: The error code of the first failed control addition. * @request_is_queued: True if the request was queued. * @requests: List to keep track of open control handler request objects. * For the parent control handler (@req_obj.ops == NULL) this * is the list header. When the parent control handler is * removed, it has to unbind and put all these requests since * they refer to the parent. * @requests_queued: List of the queued requests. This determines the order * in which these controls are applied. Once the request is * completed it is removed from this list. * @req_obj: The &struct media_request_object, used to link into a * &struct media_request. This request object has a refcount. */ struct v4l2_ctrl_handler { struct mutex _lock; struct mutex *lock; struct list_head ctrls; struct list_head ctrl_refs; struct v4l2_ctrl_ref *cached; struct v4l2_ctrl_ref **buckets; v4l2_ctrl_notify_fnc notify; void *notify_priv; u16 nr_of_buckets; int error; bool request_is_queued; struct list_head requests; struct list_head requests_queued; struct media_request_object req_obj; }; /** * struct v4l2_ctrl_config - Control configuration structure. * * @ops: The control ops. * @type_ops: The control type ops. Only needed for compound controls. * @id: The control ID. * @name: The control name. * @type: The control type. * @min: The control's minimum value. * @max: The control's maximum value. * @step: The control's step value for non-menu controls. * @def: The control's default value. * @p_def: The control's default value for compound controls. * @p_min: The control's minimum value for compound controls. * @p_max: The control's maximum value for compound controls. * @dims: The size of each dimension. * @elem_size: The size in bytes of the control. * @flags: The control's flags. * @menu_skip_mask: The control's skip mask for menu controls. This makes it * easy to skip menu items that are not valid. If bit X is set, * then menu item X is skipped. Of course, this only works for * menus with <= 64 menu items. There are no menus that come * close to that number, so this is OK. Should we ever need more, * then this will have to be extended to a bit array. * @qmenu: A const char * array for all menu items. Array entries that are * empty strings ("") correspond to non-existing menu items (this * is in addition to the menu_skip_mask above). The last entry * must be NULL. * @qmenu_int: A const s64 integer array for all menu items of the type * V4L2_CTRL_TYPE_INTEGER_MENU. * @is_private: If set, then this control is private to its handler and it * will not be added to any other handlers. */ struct v4l2_ctrl_config { const struct v4l2_ctrl_ops *ops; const struct v4l2_ctrl_type_ops *type_ops; u32 id; const char *name; enum v4l2_ctrl_type type; s64 min; s64 max; u64 step; s64 def; union v4l2_ctrl_ptr p_def; union v4l2_ctrl_ptr p_min; union v4l2_ctrl_ptr p_max; u32 dims[V4L2_CTRL_MAX_DIMS]; u32 elem_size; u32 flags; u64 menu_skip_mask; const char * const *qmenu; const s64 *qmenu_int; unsigned int is_private:1; }; /** * v4l2_ctrl_fill - Fill in the control fields based on the control ID. * * @id: ID of the control * @name: pointer to be filled with a string with the name of the control * @type: pointer for storing the type of the control * @min: pointer for storing the minimum value for the control * @max: pointer for storing the maximum value for the control * @step: pointer for storing the control step * @def: pointer for storing the default value for the control * @flags: pointer for storing the flags to be used on the control * * This works for all standard V4L2 controls. * For non-standard controls it will only fill in the given arguments * and @name content will be set to %NULL. * * This function will overwrite the contents of @name, @type and @flags. * The contents of @min, @max, @step and @def may be modified depending on * the type. * * .. note:: * * Do not use in drivers! It is used internally for backwards compatibility * control handling only. Once all drivers are converted to use the new * control framework this function will no longer be exported. */ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags); /** * v4l2_ctrl_handler_init_class() - Initialize the control handler. * @hdl: The control handler. * @nr_of_controls_hint: A hint of how many controls this handler is * expected to refer to. This is the total number, so including * any inherited controls. It doesn't have to be precise, but if * it is way off, then you either waste memory (too many buckets * are allocated) or the control lookup becomes slower (not enough * buckets are allocated, so there are more slow list lookups). * It will always work, though. * @key: Used by the lock validator if CONFIG_LOCKDEP is set. * @name: Used by the lock validator if CONFIG_LOCKDEP is set. * * .. attention:: * * Never use this call directly, always use the v4l2_ctrl_handler_init() * macro that hides the @key and @name arguments. * * Return: returns an error if the buckets could not be allocated. This * error will also be stored in @hdl->error. */ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, unsigned int nr_of_controls_hint, struct lock_class_key *key, const char *name); #ifdef CONFIG_LOCKDEP /** * v4l2_ctrl_handler_init - helper function to create a static struct * &lock_class_key and calls v4l2_ctrl_handler_init_class() * * @hdl: The control handler. * @nr_of_controls_hint: A hint of how many controls this handler is * expected to refer to. This is the total number, so including * any inherited controls. It doesn't have to be precise, but if * it is way off, then you either waste memory (too many buckets * are allocated) or the control lookup becomes slower (not enough * buckets are allocated, so there are more slow list lookups). * It will always work, though. * * This helper function creates a static struct &lock_class_key and * calls v4l2_ctrl_handler_init_class(), providing a proper name for the lock * validador. * * Use this helper function to initialize a control handler. */ #define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ ( \ ({ \ static struct lock_class_key _key; \ v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, \ &_key, \ KBUILD_BASENAME ":" \ __stringify(__LINE__) ":" \ "(" #hdl ")->_lock"); \ }) \ ) #else #define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \ v4l2_ctrl_handler_init_class(hdl, nr_of_controls_hint, NULL, NULL) #endif /** * v4l2_ctrl_handler_free() - Free all controls owned by the handler and free * the control list. * @hdl: The control handler. * * Does nothing if @hdl == NULL. */ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl); /** * v4l2_ctrl_lock() - Helper function to lock the handler * associated with the control. * @ctrl: The control to lock. */ static inline void v4l2_ctrl_lock(struct v4l2_ctrl *ctrl) { mutex_lock(ctrl->handler->lock); } /** * v4l2_ctrl_unlock() - Helper function to unlock the handler * associated with the control. * @ctrl: The control to unlock. */ static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl) { mutex_unlock(ctrl->handler->lock); } /** * __v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging * to the handler to initialize the hardware to the current control values. The * caller is responsible for acquiring the control handler mutex on behalf of * __v4l2_ctrl_handler_setup(). * @hdl: The control handler. * * Button controls will be skipped, as are read-only controls. * * If @hdl == NULL, then this just returns 0. */ int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); /** * v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging * to the handler to initialize the hardware to the current control values. * @hdl: The control handler. * * Button controls will be skipped, as are read-only controls. * * If @hdl == NULL, then this just returns 0. */ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl); /** * v4l2_ctrl_handler_log_status() - Log all controls owned by the handler. * @hdl: The control handler. * @prefix: The prefix to use when logging the control values. If the * prefix does not end with a space, then ": " will be added * after the prefix. If @prefix == NULL, then no prefix will be * used. * * For use with VIDIOC_LOG_STATUS. * * Does nothing if @hdl == NULL. */ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, const char *prefix); /** * v4l2_ctrl_new_custom() - Allocate and initialize a new custom V4L2 * control. * * @hdl: The control handler. * @cfg: The control's configuration data. * @priv: The control's driver-specific private data. * * If the &v4l2_ctrl struct could not be allocated then NULL is returned * and @hdl->error is set to the error code (if it wasn't set already). */ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_config *cfg, void *priv); /** * v4l2_ctrl_new_std() - Allocate and initialize a new standard V4L2 non-menu * control. * * @hdl: The control handler. * @ops: The control ops. * @id: The control ID. * @min: The control's minimum value. * @max: The control's maximum value. * @step: The control's step value * @def: The control's default value. * * If the &v4l2_ctrl struct could not be allocated, or the control * ID is not known, then NULL is returned and @hdl->error is set to the * appropriate error code (if it wasn't set already). * * If @id refers to a menu control, then this function will return NULL. * * Use v4l2_ctrl_new_std_menu() when adding menu controls. */ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, s64 min, s64 max, u64 step, s64 def); /** * v4l2_ctrl_new_std_menu() - Allocate and initialize a new standard V4L2 * menu control. * * @hdl: The control handler. * @ops: The control ops. * @id: The control ID. * @max: The control's maximum value. * @mask: The control's skip mask for menu controls. This makes it * easy to skip menu items that are not valid. If bit X is set, * then menu item X is skipped. Of course, this only works for * menus with <= 64 menu items. There are no menus that come * close to that number, so this is OK. Should we ever need more, * then this will have to be extended to a bit array. * @def: The control's default value. * * Same as v4l2_ctrl_new_std(), but @min is set to 0 and the @mask value * determines which menu items are to be skipped. * * If @id refers to a non-menu control, then this function will return NULL. */ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, u8 max, u64 mask, u8 def); /** * v4l2_ctrl_new_std_menu_items() - Create a new standard V4L2 menu control * with driver specific menu. * * @hdl: The control handler. * @ops: The control ops. * @id: The control ID. * @max: The control's maximum value. * @mask: The control's skip mask for menu controls. This makes it * easy to skip menu items that are not valid. If bit X is set, * then menu item X is skipped. Of course, this only works for * menus with <= 64 menu items. There are no menus that come * close to that number, so this is OK. Should we ever need more, * then this will have to be extended to a bit array. * @def: The control's default value. * @qmenu: The new menu. * * Same as v4l2_ctrl_new_std_menu(), but @qmenu will be the driver specific * menu of this control. * */ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, u8 max, u64 mask, u8 def, const char * const *qmenu); /** * v4l2_ctrl_new_std_compound() - Allocate and initialize a new standard V4L2 * compound control. * * @hdl: The control handler. * @ops: The control ops. * @id: The control ID. * @p_def: The control's default value. * @p_min: The control's minimum value. * @p_max: The control's maximum value. * * Same as v4l2_ctrl_new_std(), but with support for compound controls. * To fill in the @p_def, @p_min and @p_max fields, use v4l2_ctrl_ptr_create() * to convert a pointer to a const union v4l2_ctrl_ptr. * Use v4l2_ctrl_ptr_create(NULL) if you want the default, minimum or maximum * value of the compound control to be all zeroes. * If the compound control does not set the ``V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX`` * flag, then it does not has minimum and maximum values. In that case just use * v4l2_ctrl_ptr_create(NULL) for the @p_min and @p_max arguments. * */ struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, const union v4l2_ctrl_ptr p_def, const union v4l2_ctrl_ptr p_min, const union v4l2_ctrl_ptr p_max); /** * v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control. * * @hdl: The control handler. * @ops: The control ops. * @id: The control ID. * @max: The control's maximum value. * @def: The control's default value. * @qmenu_int: The control's menu entries. * * Same as v4l2_ctrl_new_std_menu(), but @mask is set to 0 and it additionally * takes as an argument an array of integers determining the menu items. * * If @id refers to a non-integer-menu control, then this function will * return %NULL. */ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, u8 max, u8 def, const s64 *qmenu_int); /** * typedef v4l2_ctrl_filter - Typedef to define the filter function to be * used when adding a control handler. * * @ctrl: pointer to struct &v4l2_ctrl. */ typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl); /** * v4l2_ctrl_add_handler() - Add all controls from handler @add to * handler @hdl. * * @hdl: The control handler. * @add: The control handler whose controls you want to add to * the @hdl control handler. * @filter: This function will filter which controls should be added. * @from_other_dev: If true, then the controls in @add were defined in another * device than @hdl. * * Does nothing if either of the two handlers is a NULL pointer. * If @filter is NULL, then all controls are added. Otherwise only those * controls for which @filter returns true will be added. * In case of an error @hdl->error will be set to the error code (if it * wasn't set already). */ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, struct v4l2_ctrl_handler *add, v4l2_ctrl_filter filter, bool from_other_dev); /** * v4l2_ctrl_radio_filter() - Standard filter for radio controls. * * @ctrl: The control that is filtered. * * This will return true for any controls that are valid for radio device * nodes. Those are all of the V4L2_CID_AUDIO_* user controls and all FM * transmitter class controls. * * This function is to be used with v4l2_ctrl_add_handler(). */ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl); /** * v4l2_ctrl_cluster() - Mark all controls in the cluster as belonging * to that cluster. * * @ncontrols: The number of controls in this cluster. * @controls: The cluster control array of size @ncontrols. */ void v4l2_ctrl_cluster(unsigned int ncontrols, struct v4l2_ctrl **controls); /** * v4l2_ctrl_auto_cluster() - Mark all controls in the cluster as belonging * to that cluster and set it up for autofoo/foo-type handling. * * @ncontrols: The number of controls in this cluster. * @controls: The cluster control array of size @ncontrols. The first control * must be the 'auto' control (e.g. autogain, autoexposure, etc.) * @manual_val: The value for the first control in the cluster that equals the * manual setting. * @set_volatile: If true, then all controls except the first auto control will * be volatile. * * Use for control groups where one control selects some automatic feature and * the other controls are only active whenever the automatic feature is turned * off (manual mode). Typical examples: autogain vs gain, auto-whitebalance vs * red and blue balance, etc. * * The behavior of such controls is as follows: * * When the autofoo control is set to automatic, then any manual controls * are set to inactive and any reads will call g_volatile_ctrl (if the control * was marked volatile). * * When the autofoo control is set to manual, then any manual controls will * be marked active, and any reads will just return the current value without * going through g_volatile_ctrl. * * In addition, this function will set the %V4L2_CTRL_FLAG_UPDATE flag * on the autofoo control and %V4L2_CTRL_FLAG_INACTIVE on the foo control(s) * if autofoo is in auto mode. */ void v4l2_ctrl_auto_cluster(unsigned int ncontrols, struct v4l2_ctrl **controls, u8 manual_val, bool set_volatile); /** * v4l2_ctrl_find() - Find a control with the given ID. * * @hdl: The control handler. * @id: The control ID to find. * * If @hdl == NULL this will return NULL as well. Will lock the handler so * do not use from inside &v4l2_ctrl_ops. */ struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); /** * v4l2_ctrl_activate() - Make the control active or inactive. * @ctrl: The control to (de)activate. * @active: True if the control should become active. * * This sets or clears the V4L2_CTRL_FLAG_INACTIVE flag atomically. * Does nothing if @ctrl == NULL. * This will usually be called from within the s_ctrl op. * The V4L2_EVENT_CTRL event will be generated afterwards. * * This function assumes that the control handler is locked. */ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active); /** * __v4l2_ctrl_grab() - Unlocked variant of v4l2_ctrl_grab. * * @ctrl: The control to (de)activate. * @grabbed: True if the control should become grabbed. * * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. * Does nothing if @ctrl == NULL. * The V4L2_EVENT_CTRL event will be generated afterwards. * This will usually be called when starting or stopping streaming in the * driver. * * This function assumes that the control handler is locked by the caller. */ void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed); /** * v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed. * * @ctrl: The control to (de)activate. * @grabbed: True if the control should become grabbed. * * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically. * Does nothing if @ctrl == NULL. * The V4L2_EVENT_CTRL event will be generated afterwards. * This will usually be called when starting or stopping streaming in the * driver. * * This function assumes that the control handler is not locked and will * take the lock itself. */ static inline void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) { if (!ctrl) return; v4l2_ctrl_lock(ctrl); __v4l2_ctrl_grab(ctrl, grabbed); v4l2_ctrl_unlock(ctrl); } /** *__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range() * * @ctrl: The control to update. * @min: The control's minimum value. * @max: The control's maximum value. * @step: The control's step value * @def: The control's default value. * * Update the range of a control on the fly. This works for control types * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the * @step value is interpreted as a menu_skip_mask. * * An error is returned if one of the range arguments is invalid for this * control type. * * The caller is responsible for acquiring the control handler mutex on behalf * of __v4l2_ctrl_modify_range(). */ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, s64 min, s64 max, u64 step, s64 def); /** * v4l2_ctrl_modify_range() - Update the range of a control. * * @ctrl: The control to update. * @min: The control's minimum value. * @max: The control's maximum value. * @step: The control's step value * @def: The control's default value. * * Update the range of a control on the fly. This works for control types * INTEGER, BOOLEAN, MENU, INTEGER MENU and BITMASK. For menu controls the * @step value is interpreted as a menu_skip_mask. * * An error is returned if one of the range arguments is invalid for this * control type. * * This function assumes that the control handler is not locked and will * take the lock itself. */ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, s64 min, s64 max, u64 step, s64 def) { int rval; v4l2_ctrl_lock(ctrl); rval = __v4l2_ctrl_modify_range(ctrl, min, max, step, def); v4l2_ctrl_unlock(ctrl); return rval; } /** *__v4l2_ctrl_modify_dimensions() - Unlocked variant of v4l2_ctrl_modify_dimensions() * * @ctrl: The control to update. * @dims: The control's new dimensions. * * Update the dimensions of an array control on the fly. The elements of the * array are reset to their default value, even if the dimensions are * unchanged. * * An error is returned if @dims is invalid for this control. * * The caller is responsible for acquiring the control handler mutex on behalf * of __v4l2_ctrl_modify_dimensions(). * * Note: calling this function when the same control is used in pending requests * is untested. It should work (a request with the wrong size of the control * will drop that control silently), but it will be very confusing. */ int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, u32 dims[V4L2_CTRL_MAX_DIMS]); /** * v4l2_ctrl_modify_dimensions() - Update the dimensions of an array control. * * @ctrl: The control to update. * @dims: The control's new dimensions. * * Update the dimensions of an array control on the fly. The elements of the * array are reset to their default value, even if the dimensions are * unchanged. * * An error is returned if @dims is invalid for this control type. * * This function assumes that the control handler is not locked and will * take the lock itself. * * Note: calling this function when the same control is used in pending requests * is untested. It should work (a request with the wrong size of the control * will drop that control silently), but it will be very confusing. */ static inline int v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, u32 dims[V4L2_CTRL_MAX_DIMS]) { int rval; v4l2_ctrl_lock(ctrl); rval = __v4l2_ctrl_modify_dimensions(ctrl, dims); v4l2_ctrl_unlock(ctrl); return rval; } /** * v4l2_ctrl_notify() - Function to set a notify callback for a control. * * @ctrl: The control. * @notify: The callback function. * @priv: The callback private handle, passed as argument to the callback. * * This function sets a callback function for the control. If @ctrl is NULL, * then it will do nothing. If @notify is NULL, then the notify callback will * be removed. * * There can be only one notify. If another already exists, then a WARN_ON * will be issued and the function will do nothing. */ void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv); /** * v4l2_ctrl_get_name() - Get the name of the control * * @id: The control ID. * * This function returns the name of the given control ID or NULL if it isn't * a known control. */ const char *v4l2_ctrl_get_name(u32 id); /** * v4l2_ctrl_get_menu() - Get the menu string array of the control * * @id: The control ID. * * This function returns the NULL-terminated menu string array name of the * given control ID or NULL if it isn't a known menu control. */ const char * const *v4l2_ctrl_get_menu(u32 id); /** * v4l2_ctrl_get_int_menu() - Get the integer menu array of the control * * @id: The control ID. * @len: The size of the integer array. * * This function returns the integer array of the given control ID or NULL if it * if it isn't a known integer menu control. */ const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); /** * v4l2_ctrl_g_ctrl() - Helper function to get the control's value from * within a driver. * * @ctrl: The control. * * This returns the control's value safely by going through the control * framework. This function will lock the control's handler, so it cannot be * used from within the &v4l2_ctrl_ops functions. * * This function is for integer type controls only. */ s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl); /** * __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl(). * * @ctrl: The control. * @val: The new value. * * This sets the control's new value safely by going through the control * framework. This function assumes the control's handler is already locked, * allowing it to be used from within the &v4l2_ctrl_ops functions. * * This function is for integer type controls only. */ int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val); /** * v4l2_ctrl_s_ctrl() - Helper function to set the control's value from * within a driver. * @ctrl: The control. * @val: The new value. * * This sets the control's new value safely by going through the control * framework. This function will lock the control's handler, so it cannot be * used from within the &v4l2_ctrl_ops functions. * * This function is for integer type controls only. */ static inline int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) { int rval; v4l2_ctrl_lock(ctrl); rval = __v4l2_ctrl_s_ctrl(ctrl, val); v4l2_ctrl_unlock(ctrl); return rval; } /** * v4l2_ctrl_g_ctrl_int64() - Helper function to get a 64-bit control's value * from within a driver. * * @ctrl: The control. * * This returns the control's value safely by going through the control * framework. This function will lock the control's handler, so it cannot be * used from within the &v4l2_ctrl_ops functions. * * This function is for 64-bit integer type controls only. */ s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl); /** * __v4l2_ctrl_s_ctrl_int64() - Unlocked variant of v4l2_ctrl_s_ctrl_int64(). * * @ctrl: The control. * @val: The new value. * * This sets the control's new value safely by going through the control * framework. This function assumes the control's handler is already locked, * allowing it to be used from within the &v4l2_ctrl_ops functions. * * This function is for 64-bit integer type controls only. */ int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val); /** * v4l2_ctrl_s_ctrl_int64() - Helper function to set a 64-bit control's value * from within a driver. * * @ctrl: The control. * @val: The new value. * * This sets the control's new value safely by going through the control * framework. This function will lock the control's handler, so it cannot be * used from within the &v4l2_ctrl_ops functions. * * This function is for 64-bit integer type controls only. */ static inline int v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) { int rval; v4l2_ctrl_lock(ctrl); rval = __v4l2_ctrl_s_ctrl_int64(ctrl, val); v4l2_ctrl_unlock(ctrl); return rval; } /** * __v4l2_ctrl_s_ctrl_string() - Unlocked variant of v4l2_ctrl_s_ctrl_string(). * * @ctrl: The control. * @s: The new string. * * This sets the control's new string safely by going through the control * framework. This function assumes the control's handler is already locked, * allowing it to be used from within the &v4l2_ctrl_ops functions. * * This function is for string type controls only. */ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s); /** * v4l2_ctrl_s_ctrl_string() - Helper function to set a control's string value * from within a driver. * * @ctrl: The control. * @s: The new string. * * This sets the control's new string safely by going through the control * framework. This function will lock the control's handler, so it cannot be * used from within the &v4l2_ctrl_ops functions. * * This function is for string type controls only. */ static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) { int rval; v4l2_ctrl_lock(ctrl); rval = __v4l2_ctrl_s_ctrl_string(ctrl, s); v4l2_ctrl_unlock(ctrl); return rval; } /** * __v4l2_ctrl_s_ctrl_compound() - Unlocked variant to set a compound control * * @ctrl: The control. * @type: The type of the data. * @p: The new compound payload. * * This sets the control's new compound payload safely by going through the * control framework. This function assumes the control's handler is already * locked, allowing it to be used from within the &v4l2_ctrl_ops functions. * * This function is for compound type controls only. */ int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, enum v4l2_ctrl_type type, const void *p); /** * v4l2_ctrl_s_ctrl_compound() - Helper function to set a compound control * from within a driver. * * @ctrl: The control. * @type: The type of the data. * @p: The new compound payload. * * This sets the control's new compound payload safely by going through the * control framework. This function will lock the control's handler, so it * cannot be used from within the &v4l2_ctrl_ops functions. * * This function is for compound type controls only. */ static inline int v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, enum v4l2_ctrl_type type, const void *p) { int rval; v4l2_ctrl_lock(ctrl); rval = __v4l2_ctrl_s_ctrl_compound(ctrl, type, p); v4l2_ctrl_unlock(ctrl); return rval; } /* Helper defines for area type controls */ #define __v4l2_ctrl_s_ctrl_area(ctrl, area) \ __v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) #define v4l2_ctrl_s_ctrl_area(ctrl, area) \ v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) /* Internal helper functions that deal with control events. */ extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops; /** * v4l2_ctrl_replace - Function to be used as a callback to * &struct v4l2_subscribed_event_ops replace\(\) * * @old: pointer to struct &v4l2_event with the reported * event; * @new: pointer to struct &v4l2_event with the modified * event; */ void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new); /** * v4l2_ctrl_merge - Function to be used as a callback to * &struct v4l2_subscribed_event_ops merge(\) * * @old: pointer to struct &v4l2_event with the reported * event; * @new: pointer to struct &v4l2_event with the merged * event; */ void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new); /** * v4l2_ctrl_log_status - helper function to implement %VIDIOC_LOG_STATUS ioctl * * @file: pointer to struct file * @fh: unused. Kept just to be compatible to the arguments expected by * &struct v4l2_ioctl_ops.vidioc_log_status. * * Can be used as a vidioc_log_status function that just dumps all controls * associated with the filehandle. */ int v4l2_ctrl_log_status(struct file *file, void *fh); /** * v4l2_ctrl_subscribe_event - Subscribes to an event * * * @fh: pointer to struct v4l2_fh * @sub: pointer to &struct v4l2_event_subscription * * Can be used as a vidioc_subscribe_event function that just subscribes * control events. */ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub); /** * v4l2_ctrl_poll - function to be used as a callback to the poll() * That just polls for control events. * * @file: pointer to struct file * @wait: pointer to struct poll_table_struct */ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); /** * v4l2_ctrl_request_setup - helper function to apply control values in a request * * @req: The request * @parent: The parent control handler ('priv' in media_request_object_find()) * * This is a helper function to call the control handler's s_ctrl callback with * the control values contained in the request. Do note that this approach of * applying control values in a request is only applicable to memory-to-memory * devices. */ int v4l2_ctrl_request_setup(struct media_request *req, struct v4l2_ctrl_handler *parent); /** * v4l2_ctrl_request_complete - Complete a control handler request object * * @req: The request * @parent: The parent control handler ('priv' in media_request_object_find()) * * This function is to be called on each control handler that may have had a * request object associated with it, i.e. control handlers of a driver that * supports requests. * * The function first obtains the values of any volatile controls in the control * handler and attach them to the request. Then, the function completes the * request object. */ void v4l2_ctrl_request_complete(struct media_request *req, struct v4l2_ctrl_handler *parent); /** * v4l2_ctrl_request_hdl_find - Find the control handler in the request * * @req: The request * @parent: The parent control handler ('priv' in media_request_object_find()) * * This function finds the control handler in the request. It may return * NULL if not found. When done, you must call v4l2_ctrl_request_hdl_put() * with the returned handler pointer. * * If the request is not in state VALIDATING or QUEUED, then this function * will always return NULL. * * Note that in state VALIDATING the req_queue_mutex is held, so * no objects can be added or deleted from the request. * * In state QUEUED it is the driver that will have to ensure this. */ struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, struct v4l2_ctrl_handler *parent); /** * v4l2_ctrl_request_hdl_put - Put the control handler * * @hdl: Put this control handler * * This function released the control handler previously obtained from' * v4l2_ctrl_request_hdl_find(). */ static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl) { if (hdl) media_request_object_put(&hdl->req_obj); } /** * v4l2_ctrl_request_hdl_ctrl_find() - Find a control with the given ID. * * @hdl: The control handler from the request. * @id: The ID of the control to find. * * This function returns a pointer to the control if this control is * part of the request or NULL otherwise. */ struct v4l2_ctrl * v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id); /* Helpers for ioctl_ops */ /** * v4l2_queryctrl - Helper function to implement * :ref:`VIDIOC_QUERYCTRL <vidioc_queryctrl>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler * @qc: pointer to &struct v4l2_queryctrl * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc); /** * v4l2_query_ext_ctrl_to_v4l2_queryctrl - Convert a qec to qe. * * @to: The v4l2_queryctrl to write to. * @from: The v4l2_query_ext_ctrl to read from. * * This function is a helper to convert a v4l2_query_ext_ctrl into a * v4l2_queryctrl. */ void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to, const struct v4l2_query_ext_ctrl *from); /** * v4l2_query_ext_ctrl - Helper function to implement * :ref:`VIDIOC_QUERY_EXT_CTRL <vidioc_queryctrl>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler * @qc: pointer to &struct v4l2_query_ext_ctrl * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc); /** * v4l2_querymenu - Helper function to implement * :ref:`VIDIOC_QUERYMENU <vidioc_queryctrl>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler * @qm: pointer to &struct v4l2_querymenu * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm); /** * v4l2_g_ctrl - Helper function to implement * :ref:`VIDIOC_G_CTRL <vidioc_g_ctrl>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler * @ctrl: pointer to &struct v4l2_control * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *ctrl); /** * v4l2_s_ctrl - Helper function to implement * :ref:`VIDIOC_S_CTRL <vidioc_g_ctrl>` ioctl * * @fh: pointer to &struct v4l2_fh * @hdl: pointer to &struct v4l2_ctrl_handler * * @ctrl: pointer to &struct v4l2_control * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct v4l2_control *ctrl); /** * v4l2_g_ext_ctrls - Helper function to implement * :ref:`VIDIOC_G_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler * @vdev: pointer to &struct video_device * @mdev: pointer to &struct media_device * @c: pointer to &struct v4l2_ext_controls * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *c); /** * v4l2_try_ext_ctrls - Helper function to implement * :ref:`VIDIOC_TRY_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler * @vdev: pointer to &struct video_device * @mdev: pointer to &struct media_device * @c: pointer to &struct v4l2_ext_controls * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *c); /** * v4l2_s_ext_ctrls - Helper function to implement * :ref:`VIDIOC_S_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl * * @fh: pointer to &struct v4l2_fh * @hdl: pointer to &struct v4l2_ctrl_handler * @vdev: pointer to &struct video_device * @mdev: pointer to &struct media_device * @c: pointer to &struct v4l2_ext_controls * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *c); /** * v4l2_ctrl_subdev_subscribe_event - Helper function to implement * as a &struct v4l2_subdev_core_ops subscribe_event function * that just subscribes control events. * * @sd: pointer to &struct v4l2_subdev * @fh: pointer to &struct v4l2_fh * @sub: pointer to &struct v4l2_event_subscription */ int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub); /** * v4l2_ctrl_subdev_log_status - Log all controls owned by subdev's control * handler. * * @sd: pointer to &struct v4l2_subdev */ int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd); /** * v4l2_ctrl_new_fwnode_properties() - Register controls for the device * properties * * @hdl: pointer to &struct v4l2_ctrl_handler to register controls on * @ctrl_ops: pointer to &struct v4l2_ctrl_ops to register controls with * @p: pointer to &struct v4l2_fwnode_device_properties * * This function registers controls associated to device properties, using the * property values contained in @p parameter, if the property has been set to * a value. * * Currently the following v4l2 controls are parsed and registered: * - V4L2_CID_CAMERA_ORIENTATION * - V4L2_CID_CAMERA_SENSOR_ROTATION; * * Controls already registered by the caller with the @hdl control handler are * not overwritten. Callers should register the controls they want to handle * themselves before calling this function. * * Return: 0 on success, a negative error code on failure. */ int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ctrl_ops, const struct v4l2_fwnode_device_properties *p); /** * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback. * * @ctrl: The v4l2_ctrl pointer. * @ptr1: A v4l2 control value. * @ptr2: A v4l2 control value. * * Return: true if values are equal, otherwise false. */ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2); /** * v4l2_ctrl_type_op_init - Default v4l2_ctrl_type_ops init callback. * * @ctrl: The v4l2_ctrl pointer. * @from_idx: Starting element index. * @ptr: The v4l2 control value. * * Return: void */ void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, union v4l2_ctrl_ptr ptr); /** * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback. * * @ctrl: The v4l2_ctrl pointer. * * Return: void */ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl); /** * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback. * * @ctrl: The v4l2_ctrl pointer. * @ptr: The v4l2 control value. * * Return: 0 on success, a negative error code on failure. */ int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr); #endif
4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 // SPDX-License-Identifier: GPL-2.0-only /*************************************************************************** * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> * * * * Based on Logitech G13 driver (v0.4) * * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> * * * ***************************************************************************/ #include <linux/hid.h> #include <linux/vmalloc.h> #include <linux/fb.h> #include <linux/module.h> #include "hid-picolcd.h" /* Framebuffer * * The PicoLCD use a Topway LCD module of 256x64 pixel * This display area is tiled over 4 controllers with 8 tiles * each. Each tile has 8x64 pixel, each data byte representing * a 1-bit wide vertical line of the tile. * * The display can be updated at a tile granularity. * * Chip 1 Chip 2 Chip 3 Chip 4 * +----------------+----------------+----------------+----------------+ * | Tile 1 | Tile 1 | Tile 1 | Tile 1 | * +----------------+----------------+----------------+----------------+ * | Tile 2 | Tile 2 | Tile 2 | Tile 2 | * +----------------+----------------+----------------+----------------+ * ... * +----------------+----------------+----------------+----------------+ * | Tile 8 | Tile 8 | Tile 8 | Tile 8 | * +----------------+----------------+----------------+----------------+ */ #define PICOLCDFB_NAME "picolcdfb" #define PICOLCDFB_WIDTH (256) #define PICOLCDFB_HEIGHT (64) #define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8) #define PICOLCDFB_UPDATE_RATE_LIMIT 10 #define PICOLCDFB_UPDATE_RATE_DEFAULT 2 /* Framebuffer visual structures */ static const struct fb_fix_screeninfo picolcdfb_fix = { .id = PICOLCDFB_NAME, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO01, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .line_length = PICOLCDFB_WIDTH / 8, .accel = FB_ACCEL_NONE, }; static const struct fb_var_screeninfo picolcdfb_var = { .xres = PICOLCDFB_WIDTH, .yres = PICOLCDFB_HEIGHT, .xres_virtual = PICOLCDFB_WIDTH, .yres_virtual = PICOLCDFB_HEIGHT, .width = 103, .height = 26, .bits_per_pixel = 1, .grayscale = 1, .red = { .offset = 0, .length = 1, .msb_right = 0, }, .green = { .offset = 0, .length = 1, .msb_right = 0, }, .blue = { .offset = 0, .length = 1, .msb_right = 0, }, .transp = { .offset = 0, .length = 0, .msb_right = 0, }, }; /* Send a given tile to PicoLCD */ static int picolcd_fb_send_tile(struct picolcd_data *data, u8 *vbitmap, int chip, int tile) { struct hid_report *report1, *report2; unsigned long flags; u8 *tdata; int i; report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, data->hdev); if (!report1 || report1->maxfield != 1) return -ENODEV; report2 = picolcd_out_report(REPORT_LCD_DATA, data->hdev); if (!report2 || report2->maxfield != 1) return -ENODEV; spin_lock_irqsave(&data->lock, flags); if ((data->status & PICOLCD_FAILED)) { spin_unlock_irqrestore(&data->lock, flags); return -ENODEV; } hid_set_field(report1->field[0], 0, chip << 2); hid_set_field(report1->field[0], 1, 0x02); hid_set_field(report1->field[0], 2, 0x00); hid_set_field(report1->field[0], 3, 0x00); hid_set_field(report1->field[0], 4, 0xb8 | tile); hid_set_field(report1->field[0], 5, 0x00); hid_set_field(report1->field[0], 6, 0x00); hid_set_field(report1->field[0], 7, 0x40); hid_set_field(report1->field[0], 8, 0x00); hid_set_field(report1->field[0], 9, 0x00); hid_set_field(report1->field[0], 10, 32); hid_set_field(report2->field[0], 0, (chip << 2) | 0x01); hid_set_field(report2->field[0], 1, 0x00); hid_set_field(report2->field[0], 2, 0x00); hid_set_field(report2->field[0], 3, 32); tdata = vbitmap + (tile * 4 + chip) * 64; for (i = 0; i < 64; i++) if (i < 32) hid_set_field(report1->field[0], 11 + i, tdata[i]); else hid_set_field(report2->field[0], 4 + i - 32, tdata[i]); hid_hw_request(data->hdev, report1, HID_REQ_SET_REPORT); hid_hw_request(data->hdev, report2, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&data->lock, flags); return 0; } /* Translate a single tile*/ static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp, int chip, int tile) { int i, b, changed = 0; u8 tdata[64]; u8 *vdata = vbitmap + (tile * 4 + chip) * 64; if (bpp == 1) { for (b = 7; b >= 0; b--) { const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32; for (i = 0; i < 64; i++) { tdata[i] <<= 1; tdata[i] |= (bdata[i/8] >> (i % 8)) & 0x01; } } } else if (bpp == 8) { for (b = 7; b >= 0; b--) { const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8; for (i = 0; i < 64; i++) { tdata[i] <<= 1; tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00; } } } else { /* Oops, we should never get here! */ WARN_ON(1); return 0; } for (i = 0; i < 64; i++) if (tdata[i] != vdata[i]) { changed = 1; vdata[i] = tdata[i]; } return changed; } void picolcd_fb_refresh(struct picolcd_data *data) { if (data->fb_info) schedule_delayed_work(&data->fb_info->deferred_work, 0); } /* Reconfigure LCD display */ int picolcd_fb_reset(struct picolcd_data *data, int clear) { struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev); struct picolcd_fb_data *fbdata = data->fb_info->par; int i, j; unsigned long flags; static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 }; if (!report || report->maxfield != 1) return -ENODEV; spin_lock_irqsave(&data->lock, flags); for (i = 0; i < 4; i++) { for (j = 0; j < report->field[0]->maxusage; j++) if (j == 0) hid_set_field(report->field[0], j, i << 2); else if (j < sizeof(mapcmd)) hid_set_field(report->field[0], j, mapcmd[j]); else hid_set_field(report->field[0], j, 0); hid_hw_request(data->hdev, report, HID_REQ_SET_REPORT); } spin_unlock_irqrestore(&data->lock, flags); if (clear) { memset(fbdata->vbitmap, 0, PICOLCDFB_SIZE); memset(fbdata->bitmap, 0, PICOLCDFB_SIZE*fbdata->bpp); } fbdata->force = 1; /* schedule first output of framebuffer */ if (fbdata->ready) schedule_delayed_work(&data->fb_info->deferred_work, 0); else fbdata->ready = 1; return 0; } /* Update fb_vbitmap from the screen_buffer and send changed tiles to device */ static void picolcd_fb_update(struct fb_info *info) { int chip, tile, n; unsigned long flags; struct picolcd_fb_data *fbdata = info->par; struct picolcd_data *data; mutex_lock(&info->lock); spin_lock_irqsave(&fbdata->lock, flags); if (!fbdata->ready && fbdata->picolcd) picolcd_fb_reset(fbdata->picolcd, 0); spin_unlock_irqrestore(&fbdata->lock, flags); /* * Translate the framebuffer into the format needed by the PicoLCD. * See display layout above. * Do this one tile after the other and push those tiles that changed. * * Wait for our IO to complete as otherwise we might flood the queue! */ n = 0; for (chip = 0; chip < 4; chip++) for (tile = 0; tile < 8; tile++) { if (!fbdata->force && !picolcd_fb_update_tile( fbdata->vbitmap, fbdata->bitmap, fbdata->bpp, chip, tile)) continue; n += 2; if (n >= HID_OUTPUT_FIFO_SIZE / 2) { spin_lock_irqsave(&fbdata->lock, flags); data = fbdata->picolcd; spin_unlock_irqrestore(&fbdata->lock, flags); mutex_unlock(&info->lock); if (!data) return; hid_hw_wait(data->hdev); mutex_lock(&info->lock); n = 0; } spin_lock_irqsave(&fbdata->lock, flags); data = fbdata->picolcd; spin_unlock_irqrestore(&fbdata->lock, flags); if (!data || picolcd_fb_send_tile(data, fbdata->vbitmap, chip, tile)) goto out; } fbdata->force = false; if (n) { spin_lock_irqsave(&fbdata->lock, flags); data = fbdata->picolcd; spin_unlock_irqrestore(&fbdata->lock, flags); mutex_unlock(&info->lock); if (data) hid_hw_wait(data->hdev); return; } out: mutex_unlock(&info->lock); } static int picolcd_fb_blank(int blank, struct fb_info *info) { /* We let fb notification do this for us via lcd/backlight device */ return 0; } static void picolcd_fb_destroy(struct fb_info *info) { struct picolcd_fb_data *fbdata = info->par; /* make sure no work is deferred */ fb_deferred_io_cleanup(info); /* No thirdparty should ever unregister our framebuffer! */ WARN_ON(fbdata->picolcd != NULL); vfree((u8 *)info->fix.smem_start); framebuffer_release(info); } static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { __u32 bpp = var->bits_per_pixel; __u32 activate = var->activate; /* only allow 1/8 bit depth (8-bit is grayscale) */ *var = picolcdfb_var; var->activate = activate; if (bpp >= 8) { var->bits_per_pixel = 8; var->red.length = 8; var->green.length = 8; var->blue.length = 8; } else { var->bits_per_pixel = 1; var->red.length = 1; var->green.length = 1; var->blue.length = 1; } return 0; } static int picolcd_set_par(struct fb_info *info) { struct picolcd_fb_data *fbdata = info->par; u8 *tmp_fb, *o_fb; if (info->var.bits_per_pixel == fbdata->bpp) return 0; /* switch between 1/8 bit depths */ if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8) return -EINVAL; o_fb = fbdata->bitmap; tmp_fb = kmalloc_array(PICOLCDFB_SIZE, info->var.bits_per_pixel, GFP_KERNEL); if (!tmp_fb) return -ENOMEM; /* translate FB content to new bits-per-pixel */ if (info->var.bits_per_pixel == 1) { int i, b; for (i = 0; i < PICOLCDFB_SIZE; i++) { u8 p = 0; for (b = 0; b < 8; b++) { p <<= 1; p |= o_fb[i*8+b] ? 0x01 : 0x00; } tmp_fb[i] = p; } memcpy(o_fb, tmp_fb, PICOLCDFB_SIZE); info->fix.visual = FB_VISUAL_MONO01; info->fix.line_length = PICOLCDFB_WIDTH / 8; } else { int i; memcpy(tmp_fb, o_fb, PICOLCDFB_SIZE); for (i = 0; i < PICOLCDFB_SIZE * 8; i++) o_fb[i] = tmp_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00; info->fix.visual = FB_VISUAL_DIRECTCOLOR; info->fix.line_length = PICOLCDFB_WIDTH; } kfree(tmp_fb); fbdata->bpp = info->var.bits_per_pixel; return 0; } static void picolcdfb_ops_damage_range(struct fb_info *info, off_t off, size_t len) { if (!info->par) return; schedule_delayed_work(&info->deferred_work, 0); } static void picolcdfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height) { if (!info->par) return; schedule_delayed_work(&info->deferred_work, 0); } FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(picolcdfb_ops, picolcdfb_ops_damage_range, picolcdfb_ops_damage_area) static const struct fb_ops picolcdfb_ops = { .owner = THIS_MODULE, FB_DEFAULT_DEFERRED_OPS(picolcdfb_ops), .fb_destroy = picolcd_fb_destroy, .fb_blank = picolcd_fb_blank, .fb_check_var = picolcd_fb_check_var, .fb_set_par = picolcd_set_par, }; /* Callback from deferred IO workqueue */ static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagereflist) { picolcd_fb_update(info); } static const struct fb_deferred_io picolcd_fb_defio = { .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT, .deferred_io = picolcd_fb_deferred_io, }; /* * The "fb_update_rate" sysfs attribute */ static ssize_t picolcd_fb_update_rate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct picolcd_data *data = dev_get_drvdata(dev); struct picolcd_fb_data *fbdata = data->fb_info->par; unsigned i, fb_update_rate = fbdata->update_rate; size_t ret = 0; for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++) if (i == fb_update_rate) ret += sysfs_emit_at(buf, ret, "[%u] ", i); else ret += sysfs_emit_at(buf, ret, "%u ", i); if (ret > 0) buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n'; return ret; } static ssize_t picolcd_fb_update_rate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct picolcd_data *data = dev_get_drvdata(dev); struct picolcd_fb_data *fbdata = data->fb_info->par; int i; unsigned u; if (count < 1 || count > 10) return -EINVAL; i = sscanf(buf, "%u", &u); if (i != 1) return -EINVAL; if (u > PICOLCDFB_UPDATE_RATE_LIMIT) return -ERANGE; else if (u == 0) u = PICOLCDFB_UPDATE_RATE_DEFAULT; fbdata->update_rate = u; data->fb_info->fbdefio->delay = HZ / fbdata->update_rate; return count; } static DEVICE_ATTR(fb_update_rate, 0664, picolcd_fb_update_rate_show, picolcd_fb_update_rate_store); /* initialize Framebuffer device */ int picolcd_init_framebuffer(struct picolcd_data *data) { struct device *dev = &data->hdev->dev; struct fb_info *info = NULL; struct picolcd_fb_data *fbdata = NULL; int i, error = -ENOMEM; u32 *palette; /* The extra memory is: * - 256*u32 for pseudo_palette * - struct fb_deferred_io */ info = framebuffer_alloc(256 * sizeof(u32) + sizeof(struct fb_deferred_io) + sizeof(struct picolcd_fb_data) + PICOLCDFB_SIZE, dev); if (!info) goto err_nomem; info->fbdefio = info->par; *info->fbdefio = picolcd_fb_defio; info->par += sizeof(struct fb_deferred_io); palette = info->par; info->par += 256 * sizeof(u32); for (i = 0; i < 256; i++) palette[i] = i > 0 && i < 16 ? 0xff : 0; info->pseudo_palette = palette; info->fbops = &picolcdfb_ops; info->var = picolcdfb_var; info->fix = picolcdfb_fix; info->fix.smem_len = PICOLCDFB_SIZE*8; #ifdef CONFIG_FB_BACKLIGHT #ifdef CONFIG_HID_PICOLCD_BACKLIGHT info->bl_dev = data->backlight; #endif #endif #ifdef CONFIG_HID_PICOLCD_LCD info->lcd_dev = data->lcd; #endif fbdata = info->par; spin_lock_init(&fbdata->lock); fbdata->picolcd = data; fbdata->update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT; fbdata->bpp = picolcdfb_var.bits_per_pixel; fbdata->force = 1; fbdata->vbitmap = info->par + sizeof(struct picolcd_fb_data); fbdata->bitmap = vmalloc(PICOLCDFB_SIZE*8); if (fbdata->bitmap == NULL) { dev_err(dev, "can't get a free page for framebuffer\n"); goto err_nomem; } info->flags |= FBINFO_VIRTFB; info->screen_buffer = fbdata->bitmap; info->fix.smem_start = (unsigned long)fbdata->bitmap; memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE); data->fb_info = info; error = picolcd_fb_reset(data, 1); if (error) { dev_err(dev, "failed to configure display\n"); goto err_cleanup; } error = device_create_file(dev, &dev_attr_fb_update_rate); if (error) { dev_err(dev, "failed to create sysfs attributes\n"); goto err_cleanup; } fb_deferred_io_init(info); error = register_framebuffer(info); if (error) { dev_err(dev, "failed to register framebuffer\n"); goto err_sysfs; } return 0; err_sysfs: device_remove_file(dev, &dev_attr_fb_update_rate); fb_deferred_io_cleanup(info); err_cleanup: data->fb_info = NULL; err_nomem: if (fbdata) vfree(fbdata->bitmap); framebuffer_release(info); return error; } void picolcd_exit_framebuffer(struct picolcd_data *data) { struct fb_info *info = data->fb_info; struct picolcd_fb_data *fbdata; unsigned long flags; if (!info) return; device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate); fbdata = info->par; /* disconnect framebuffer from HID dev */ spin_lock_irqsave(&fbdata->lock, flags); fbdata->picolcd = NULL; spin_unlock_irqrestore(&fbdata->lock, flags); /* make sure there is no running update - thus that fbdata->picolcd * once obtained under lock is guaranteed not to get free() under * the feet of the deferred work */ flush_delayed_work(&info->deferred_work); data->fb_info = NULL; unregister_framebuffer(info); }
41 41 41 21 41 41 9 9 9 9 9 108 108 108 20 20 119 118 119 119 5 5 5 1 1 1 2 2 2 2 102 102 102 105 105 105 105 105 105 18 18 18 18 18 18 18 18 18 18 8 3 8 8 48 48 48 48 48 48 31 48 5 26 26 22 22 101 105 3194 105 105 18 105 104 105 105 105 18 113 113 113 113 105 104 105 105 105 105 105 104 104 105 105 18 18 18 18 18 18 18 18 113 113 113 8 8 105 104 8 18 18 18 123 123 123 104 18 123 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 // SPDX-License-Identifier: GPL-2.0 /* * udc.c - Core UDC Framework * * Copyright (C) 2010 Texas Instruments * Author: Felipe Balbi <balbi@ti.com> */ #define pr_fmt(fmt) "UDC core: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/list.h> #include <linux/idr.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/sched/task_stack.h> #include <linux/workqueue.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb.h> #include "trace.h" static DEFINE_IDA(gadget_id_numbers); static const struct bus_type gadget_bus_type; /** * struct usb_udc - describes one usb device controller * @driver: the gadget driver pointer. For use by the class code * @dev: the child device to the actual controller * @gadget: the gadget. For use by the class code * @list: for use by the udc class driver * @vbus: for udcs who care about vbus status, this value is real vbus status; * for udcs who do not care about vbus status, this value is always true * @started: the UDC's started state. True if the UDC had started. * @allow_connect: Indicates whether UDC is allowed to be pulled up. * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or * unbound. * @vbus_work: work routine to handle VBUS status change notifications. * @connect_lock: protects udc->started, gadget->connect, * gadget->allow_connect and gadget->deactivate. The routines * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(), * usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and * usb_gadget_udc_stop_locked() are called with this lock held. * * This represents the internal data structure which is used by the UDC-class * to hold information about udc driver and gadget together. */ struct usb_udc { struct usb_gadget_driver *driver; struct usb_gadget *gadget; struct device dev; struct list_head list; bool vbus; bool started; bool allow_connect; struct work_struct vbus_work; struct mutex connect_lock; }; static const struct class udc_class; static LIST_HEAD(udc_list); /* Protects udc_list, udc->driver, driver->is_bound, and related calls */ static DEFINE_MUTEX(udc_lock); /* ------------------------------------------------------------------------- */ /** * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint * @ep:the endpoint being configured * @maxpacket_limit:value of maximum packet size limit * * This function should be used only in UDC drivers to initialize endpoint * (usually in probe function). */ void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit) { ep->maxpacket_limit = maxpacket_limit; ep->maxpacket = maxpacket_limit; trace_usb_ep_set_maxpacket_limit(ep, 0); } EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit); /** * usb_ep_enable - configure endpoint, making it usable * @ep:the endpoint being configured. may not be the endpoint named "ep0". * drivers discover endpoints through the ep_list of a usb_gadget. * * When configurations are set, or when interface settings change, the driver * will enable or disable the relevant endpoints. while it is enabled, an * endpoint may be used for i/o until the driver receives a disconnect() from * the host or until the endpoint is disabled. * * the ep0 implementation (which calls this routine) must ensure that the * hardware capabilities of each endpoint match the descriptor provided * for it. for example, an endpoint named "ep2in-bulk" would be usable * for interrupt transfers as well as bulk, but it likely couldn't be used * for iso transfers or for endpoint 14. some endpoints are fully * configurable, with more generic names like "ep-a". (remember that for * USB, "in" means "towards the USB host".) * * This routine may be called in an atomic (interrupt) context. * * returns zero, or a negative error code. */ int usb_ep_enable(struct usb_ep *ep) { int ret = 0; if (ep->enabled) goto out; /* UDC drivers can't handle endpoints with maxpacket size 0 */ if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); ret = -EINVAL; goto out; } ret = ep->ops->enable(ep, ep->desc); if (ret) goto out; ep->enabled = true; out: trace_usb_ep_enable(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_enable); /** * usb_ep_disable - endpoint is no longer usable * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0". * * no other task may be using this endpoint when this is called. * any pending and uncompleted requests will complete with status * indicating disconnect (-ESHUTDOWN) before this call returns. * gadget drivers must call usb_ep_enable() again before queueing * requests to the endpoint. * * This routine may be called in an atomic (interrupt) context. * * returns zero, or a negative error code. */ int usb_ep_disable(struct usb_ep *ep) { int ret = 0; if (!ep->enabled) goto out; ret = ep->ops->disable(ep); if (ret) goto out; ep->enabled = false; out: trace_usb_ep_disable(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_disable); /** * usb_ep_alloc_request - allocate a request object to use with this endpoint * @ep:the endpoint to be used with with the request * @gfp_flags:GFP_* flags to use * * Request objects must be allocated with this call, since they normally * need controller-specific setup and may even need endpoint-specific * resources such as allocation of DMA descriptors. * Requests may be submitted with usb_ep_queue(), and receive a single * completion callback. Free requests with usb_ep_free_request(), when * they are no longer needed. * * Returns the request, or null if one could not be allocated. */ struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct usb_request *req = NULL; req = ep->ops->alloc_request(ep, gfp_flags); trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM); return req; } EXPORT_SYMBOL_GPL(usb_ep_alloc_request); /** * usb_ep_free_request - frees a request object * @ep:the endpoint associated with the request * @req:the request being freed * * Reverses the effect of usb_ep_alloc_request(). * Caller guarantees the request is not queued, and that it will * no longer be requeued (or otherwise used). */ void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req) { trace_usb_ep_free_request(ep, req, 0); ep->ops->free_request(ep, req); } EXPORT_SYMBOL_GPL(usb_ep_free_request); /** * usb_ep_queue - queues (submits) an I/O request to an endpoint. * @ep:the endpoint associated with the request * @req:the request being submitted * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't * pre-allocate all necessary memory with the request. * * This tells the device controller to perform the specified request through * that endpoint (reading or writing a buffer). When the request completes, * including being canceled by usb_ep_dequeue(), the request's completion * routine is called to return the request to the driver. Any endpoint * (except control endpoints like ep0) may have more than one transfer * request queued; they complete in FIFO order. Once a gadget driver * submits a request, that request may not be examined or modified until it * is given back to that driver through the completion callback. * * Each request is turned into one or more packets. The controller driver * never merges adjacent requests into the same packet. OUT transfers * will sometimes use data that's already buffered in the hardware. * Drivers can rely on the fact that the first byte of the request's buffer * always corresponds to the first byte of some USB packet, for both * IN and OUT transfers. * * Bulk endpoints can queue any amount of data; the transfer is packetized * automatically. The last packet will be short if the request doesn't fill it * out completely. Zero length packets (ZLPs) should be avoided in portable * protocols since not all usb hardware can successfully handle zero length * packets. (ZLPs may be explicitly written, and may be implicitly written if * the request 'zero' flag is set.) Bulk endpoints may also be used * for interrupt transfers; but the reverse is not true, and some endpoints * won't support every interrupt transfer. (Such as 768 byte packets.) * * Interrupt-only endpoints are less functional than bulk endpoints, for * example by not supporting queueing or not handling buffers that are * larger than the endpoint's maxpacket size. They may also treat data * toggle differently. * * Control endpoints ... after getting a setup() callback, the driver queues * one response (even if it would be zero length). That enables the * status ack, after transferring data as specified in the response. Setup * functions may return negative error codes to generate protocol stalls. * (Note that some USB device controllers disallow protocol stall responses * in some cases.) When control responses are deferred (the response is * written after the setup callback returns), then usb_ep_set_halt() may be * used on ep0 to trigger protocol stalls. Depending on the controller, * it may not be possible to trigger a status-stage protocol stall when the * data stage is over, that is, from within the response's completion * routine. * * For periodic endpoints, like interrupt or isochronous ones, the usb host * arranges to poll once per interval, and the gadget driver usually will * have queued some data to transfer at that time. * * Note that @req's ->complete() callback must never be called from * within usb_ep_queue() as that can create deadlock situations. * * This routine may be called in interrupt context. * * Returns zero, or a negative error code. Endpoints that are not enabled * report errors; errors will also be * reported when the usb peripheral is disconnected. * * If and only if @req is successfully queued (the return value is zero), * @req->complete() will be called exactly once, when the Gadget core and * UDC are finished with the request. When the completion function is called, * control of the request is returned to the device driver which submitted it. * The completion handler may then immediately free or reuse @req. */ int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { int ret = 0; if (!ep->enabled && ep->address) { pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n", ep->address, ep->name); ret = -ESHUTDOWN; goto out; } ret = ep->ops->queue(ep, req, gfp_flags); out: trace_usb_ep_queue(ep, req, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_queue); /** * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint * @ep:the endpoint associated with the request * @req:the request being canceled * * If the request is still active on the endpoint, it is dequeued and * eventually its completion routine is called (with status -ECONNRESET); * else a negative error code is returned. This routine is asynchronous, * that is, it may return before the completion routine runs. * * Note that some hardware can't clear out write fifos (to unlink the request * at the head of the queue) except as part of disconnecting from usb. Such * restrictions prevent drivers from supporting configuration changes, * even to configuration zero (a "chapter 9" requirement). * * This routine may be called in interrupt context. */ int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { int ret; ret = ep->ops->dequeue(ep, req); trace_usb_ep_dequeue(ep, req, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_dequeue); /** * usb_ep_set_halt - sets the endpoint halt feature. * @ep: the non-isochronous endpoint being stalled * * Use this to stall an endpoint, perhaps as an error report. * Except for control endpoints, * the endpoint stays halted (will not stream any data) until the host * clears this feature; drivers may need to empty the endpoint's request * queue first, to make sure no inappropriate transfers happen. * * Note that while an endpoint CLEAR_FEATURE will be invisible to the * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the * current altsetting, see usb_ep_clear_halt(). When switching altsettings, * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints. * * This routine may be called in interrupt context. * * Returns zero, or a negative error code. On success, this call sets * underlying hardware state that blocks data transfers. * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any * transfer requests are still queued, or if the controller hardware * (usually a FIFO) still holds bytes that the host hasn't collected. */ int usb_ep_set_halt(struct usb_ep *ep) { int ret; ret = ep->ops->set_halt(ep, 1); trace_usb_ep_set_halt(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_set_halt); /** * usb_ep_clear_halt - clears endpoint halt, and resets toggle * @ep:the bulk or interrupt endpoint being reset * * Use this when responding to the standard usb "set interface" request, * for endpoints that aren't reconfigured, after clearing any other state * in the endpoint's i/o queue. * * This routine may be called in interrupt context. * * Returns zero, or a negative error code. On success, this call clears * the underlying hardware state reflecting endpoint halt and data toggle. * Note that some hardware can't support this request (like pxa2xx_udc), * and accordingly can't correctly implement interface altsettings. */ int usb_ep_clear_halt(struct usb_ep *ep) { int ret; ret = ep->ops->set_halt(ep, 0); trace_usb_ep_clear_halt(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_clear_halt); /** * usb_ep_set_wedge - sets the halt feature and ignores clear requests * @ep: the endpoint being wedged * * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT) * requests. If the gadget driver clears the halt status, it will * automatically unwedge the endpoint. * * This routine may be called in interrupt context. * * Returns zero on success, else negative errno. */ int usb_ep_set_wedge(struct usb_ep *ep) { int ret; if (ep->ops->set_wedge) ret = ep->ops->set_wedge(ep); else ret = ep->ops->set_halt(ep, 1); trace_usb_ep_set_wedge(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_set_wedge); /** * usb_ep_fifo_status - returns number of bytes in fifo, or error * @ep: the endpoint whose fifo status is being checked. * * FIFO endpoints may have "unclaimed data" in them in certain cases, * such as after aborted transfers. Hosts may not have collected all * the IN data written by the gadget driver (and reported by a request * completion). The gadget driver may not have collected all the data * written OUT to it by the host. Drivers that need precise handling for * fault reporting or recovery may need to use this call. * * This routine may be called in interrupt context. * * This returns the number of such bytes in the fifo, or a negative * errno if the endpoint doesn't use a FIFO or doesn't support such * precise handling. */ int usb_ep_fifo_status(struct usb_ep *ep) { int ret; if (ep->ops->fifo_status) ret = ep->ops->fifo_status(ep); else ret = -EOPNOTSUPP; trace_usb_ep_fifo_status(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_fifo_status); /** * usb_ep_fifo_flush - flushes contents of a fifo * @ep: the endpoint whose fifo is being flushed. * * This call may be used to flush the "unclaimed data" that may exist in * an endpoint fifo after abnormal transaction terminations. The call * must never be used except when endpoint is not being used for any * protocol translation. * * This routine may be called in interrupt context. */ void usb_ep_fifo_flush(struct usb_ep *ep) { if (ep->ops->fifo_flush) ep->ops->fifo_flush(ep); trace_usb_ep_fifo_flush(ep, 0); } EXPORT_SYMBOL_GPL(usb_ep_fifo_flush); /* ------------------------------------------------------------------------- */ /** * usb_gadget_frame_number - returns the current frame number * @gadget: controller that reports the frame number * * Returns the usb frame number, normally eleven bits from a SOF packet, * or negative errno if this device doesn't support this capability. */ int usb_gadget_frame_number(struct usb_gadget *gadget) { int ret; ret = gadget->ops->get_frame(gadget); trace_usb_gadget_frame_number(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_frame_number); /** * usb_gadget_wakeup - tries to wake up the host connected to this gadget * @gadget: controller used to wake up the host * * Returns zero on success, else negative error code if the hardware * doesn't support such attempts, or its support has not been enabled * by the usb host. Drivers must return device descriptors that report * their ability to support this, or hosts won't enable it. * * This may also try to use SRP to wake the host and start enumeration, * even if OTG isn't otherwise in use. OTG devices may also start * remote wakeup even when hosts don't explicitly enable it. */ int usb_gadget_wakeup(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->wakeup) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->wakeup(gadget); out: trace_usb_gadget_wakeup(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_wakeup); /** * usb_gadget_set_remote_wakeup - configures the device remote wakeup feature. * @gadget:the device being configured for remote wakeup * @set:value to be configured. * * set to one to enable remote wakeup feature and zero to disable it. * * returns zero on success, else negative errno. */ int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set) { int ret = 0; if (!gadget->ops->set_remote_wakeup) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->set_remote_wakeup(gadget, set); out: trace_usb_gadget_set_remote_wakeup(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_set_remote_wakeup); /** * usb_gadget_set_selfpowered - sets the device selfpowered feature. * @gadget:the device being declared as self-powered * * this affects the device status reported by the hardware driver * to reflect that it now has a local power supply. * * returns zero on success, else negative errno. */ int usb_gadget_set_selfpowered(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->set_selfpowered) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->set_selfpowered(gadget, 1); out: trace_usb_gadget_set_selfpowered(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_set_selfpowered); /** * usb_gadget_clear_selfpowered - clear the device selfpowered feature. * @gadget:the device being declared as bus-powered * * this affects the device status reported by the hardware driver. * some hardware may not support bus-powered operation, in which * case this feature's value can never change. * * returns zero on success, else negative errno. */ int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->set_selfpowered) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->set_selfpowered(gadget, 0); out: trace_usb_gadget_clear_selfpowered(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_clear_selfpowered); /** * usb_gadget_vbus_connect - Notify controller that VBUS is powered * @gadget:The device which now has VBUS power. * Context: can sleep * * This call is used by a driver for an external transceiver (or GPIO) * that detects a VBUS power session starting. Common responses include * resuming the controller, activating the D+ (or D-) pullup to let the * host detect that a USB device is attached, and starting to draw power * (8mA or possibly more, especially after SET_CONFIGURATION). * * Returns zero on success, else negative errno. */ int usb_gadget_vbus_connect(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->vbus_session) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->vbus_session(gadget, 1); out: trace_usb_gadget_vbus_connect(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_vbus_connect); /** * usb_gadget_vbus_draw - constrain controller's VBUS power usage * @gadget:The device whose VBUS usage is being described * @mA:How much current to draw, in milliAmperes. This should be twice * the value listed in the configuration descriptor bMaxPower field. * * This call is used by gadget drivers during SET_CONFIGURATION calls, * reporting how much power the device may consume. For example, this * could affect how quickly batteries are recharged. * * Returns zero on success, else negative errno. */ int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { int ret = 0; if (!gadget->ops->vbus_draw) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->vbus_draw(gadget, mA); if (!ret) gadget->mA = mA; out: trace_usb_gadget_vbus_draw(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_vbus_draw); /** * usb_gadget_vbus_disconnect - notify controller about VBUS session end * @gadget:the device whose VBUS supply is being described * Context: can sleep * * This call is used by a driver for an external transceiver (or GPIO) * that detects a VBUS power session ending. Common responses include * reversing everything done in usb_gadget_vbus_connect(). * * Returns zero on success, else negative errno. */ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->vbus_session) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->vbus_session(gadget, 0); out: trace_usb_gadget_vbus_disconnect(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect); static int usb_gadget_connect_locked(struct usb_gadget *gadget) __must_hold(&gadget->udc->connect_lock) { int ret = 0; if (!gadget->ops->pullup) { ret = -EOPNOTSUPP; goto out; } if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) { /* * If the gadget isn't usable (because it is deactivated, * unbound, or not yet started), we only save the new state. * The gadget will be connected automatically when it is * activated/bound/started. */ gadget->connected = true; goto out; } ret = gadget->ops->pullup(gadget, 1); if (!ret) gadget->connected = 1; out: trace_usb_gadget_connect(gadget, ret); return ret; } /** * usb_gadget_connect - software-controlled connect to USB host * @gadget:the peripheral being connected * * Enables the D+ (or potentially D-) pullup. The host will start * enumerating this gadget when the pullup is active and a VBUS session * is active (the link is powered). * * Returns zero on success, else negative errno. */ int usb_gadget_connect(struct usb_gadget *gadget) { int ret; mutex_lock(&gadget->udc->connect_lock); ret = usb_gadget_connect_locked(gadget); mutex_unlock(&gadget->udc->connect_lock); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_connect); static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) __must_hold(&gadget->udc->connect_lock) { int ret = 0; if (!gadget->ops->pullup) { ret = -EOPNOTSUPP; goto out; } if (!gadget->connected) goto out; if (gadget->deactivated || !gadget->udc->started) { /* * If gadget is deactivated we only save new state. * Gadget will stay disconnected after activation. */ gadget->connected = false; goto out; } ret = gadget->ops->pullup(gadget, 0); if (!ret) gadget->connected = 0; mutex_lock(&udc_lock); if (gadget->udc->driver) gadget->udc->driver->disconnect(gadget); mutex_unlock(&udc_lock); out: trace_usb_gadget_disconnect(gadget, ret); return ret; } /** * usb_gadget_disconnect - software-controlled disconnect from USB host * @gadget:the peripheral being disconnected * * Disables the D+ (or potentially D-) pullup, which the host may see * as a disconnect (when a VBUS session is active). Not all systems * support software pullup controls. * * Following a successful disconnect, invoke the ->disconnect() callback * for the current gadget driver so that UDC drivers don't need to. * * Returns zero on success, else negative errno. */ int usb_gadget_disconnect(struct usb_gadget *gadget) { int ret; mutex_lock(&gadget->udc->connect_lock); ret = usb_gadget_disconnect_locked(gadget); mutex_unlock(&gadget->udc->connect_lock); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_disconnect); /** * usb_gadget_deactivate - deactivate function which is not ready to work * @gadget: the peripheral being deactivated * * This routine may be used during the gadget driver bind() call to prevent * the peripheral from ever being visible to the USB host, unless later * usb_gadget_activate() is called. For example, user mode components may * need to be activated before the system can talk to hosts. * * This routine may sleep; it must not be called in interrupt context * (such as from within a gadget driver's disconnect() callback). * * Returns zero on success, else negative errno. */ int usb_gadget_deactivate(struct usb_gadget *gadget) { int ret = 0; mutex_lock(&gadget->udc->connect_lock); if (gadget->deactivated) goto unlock; if (gadget->connected) { ret = usb_gadget_disconnect_locked(gadget); if (ret) goto unlock; /* * If gadget was being connected before deactivation, we want * to reconnect it in usb_gadget_activate(). */ gadget->connected = true; } gadget->deactivated = true; unlock: mutex_unlock(&gadget->udc->connect_lock); trace_usb_gadget_deactivate(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_deactivate); /** * usb_gadget_activate - activate function which is not ready to work * @gadget: the peripheral being activated * * This routine activates gadget which was previously deactivated with * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed. * * This routine may sleep; it must not be called in interrupt context. * * Returns zero on success, else negative errno. */ int usb_gadget_activate(struct usb_gadget *gadget) { int ret = 0; mutex_lock(&gadget->udc->connect_lock); if (!gadget->deactivated) goto unlock; gadget->deactivated = false; /* * If gadget has been connected before deactivation, or became connected * while it was being deactivated, we call usb_gadget_connect(). */ if (gadget->connected) ret = usb_gadget_connect_locked(gadget); unlock: mutex_unlock(&gadget->udc->connect_lock); trace_usb_gadget_activate(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_activate); /* ------------------------------------------------------------------------- */ #ifdef CONFIG_HAS_DMA int usb_gadget_map_request_by_dev(struct device *dev, struct usb_request *req, int is_in) { if (req->length == 0) return 0; if (req->sg_was_mapped) { req->num_mapped_sgs = req->num_sgs; return 0; } if (req->num_sgs) { int mapped; mapped = dma_map_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mapped == 0) { dev_err(dev, "failed to map SGs\n"); return -EFAULT; } req->num_mapped_sgs = mapped; } else { if (is_vmalloc_addr(req->buf)) { dev_err(dev, "buffer is not dma capable\n"); return -EFAULT; } else if (object_is_on_stack(req->buf)) { dev_err(dev, "buffer is on stack\n"); return -EFAULT; } req->dma = dma_map_single(dev, req->buf, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (dma_mapping_error(dev, req->dma)) { dev_err(dev, "failed to map buffer\n"); return -EFAULT; } req->dma_mapped = 1; } return 0; } EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev); int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in); } EXPORT_SYMBOL_GPL(usb_gadget_map_request); void usb_gadget_unmap_request_by_dev(struct device *dev, struct usb_request *req, int is_in) { if (req->length == 0 || req->sg_was_mapped) return; if (req->num_mapped_sgs) { dma_unmap_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; } else if (req->dma_mapped) { dma_unmap_single(dev, req->dma, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->dma_mapped = 0; } } EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev); void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in); } EXPORT_SYMBOL_GPL(usb_gadget_unmap_request); #endif /* CONFIG_HAS_DMA */ /* ------------------------------------------------------------------------- */ /** * usb_gadget_giveback_request - give the request back to the gadget layer * @ep: the endpoint to be used with with the request * @req: the request being given back * * This is called by device controller drivers in order to return the * completed request back to the gadget layer. */ void usb_gadget_giveback_request(struct usb_ep *ep, struct usb_request *req) { if (likely(req->status == 0)) usb_led_activity(USB_LED_EVENT_GADGET); trace_usb_gadget_giveback_request(ep, req, 0); req->complete(ep, req); } EXPORT_SYMBOL_GPL(usb_gadget_giveback_request); /* ------------------------------------------------------------------------- */ /** * gadget_find_ep_by_name - returns ep whose name is the same as sting passed * in second parameter or NULL if searched endpoint not found * @g: controller to check for quirk * @name: name of searched endpoint */ struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name) { struct usb_ep *ep; gadget_for_each_ep(ep, g) { if (!strcmp(ep->name, name)) return ep; } return NULL; } EXPORT_SYMBOL_GPL(gadget_find_ep_by_name); /* ------------------------------------------------------------------------- */ int usb_gadget_ep_match_desc(struct usb_gadget *gadget, struct usb_ep *ep, struct usb_endpoint_descriptor *desc, struct usb_ss_ep_comp_descriptor *ep_comp) { u8 type; u16 max; int num_req_streams = 0; /* endpoint already claimed? */ if (ep->claimed) return 0; type = usb_endpoint_type(desc); max = usb_endpoint_maxp(desc); if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in) return 0; if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out) return 0; if (max > ep->maxpacket_limit) return 0; /* "high bandwidth" works only at high speed */ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1) return 0; switch (type) { case USB_ENDPOINT_XFER_CONTROL: /* only support ep0 for portable CONTROL traffic */ return 0; case USB_ENDPOINT_XFER_ISOC: if (!ep->caps.type_iso) return 0; /* ISO: limit 1023 bytes full speed, 1024 high/super speed */ if (!gadget_is_dualspeed(gadget) && max > 1023) return 0; break; case USB_ENDPOINT_XFER_BULK: if (!ep->caps.type_bulk) return 0; if (ep_comp && gadget_is_superspeed(gadget)) { /* Get the number of required streams from the * EP companion descriptor and see if the EP * matches it */ num_req_streams = ep_comp->bmAttributes & 0x1f; if (num_req_streams > ep->max_streams) return 0; } break; case USB_ENDPOINT_XFER_INT: /* Bulk endpoints handle interrupt transfers, * except the toggle-quirky iso-synch kind */ if (!ep->caps.type_int && !ep->caps.type_bulk) return 0; /* INT: limit 64 bytes full speed, 1024 high/super speed */ if (!gadget_is_dualspeed(gadget) && max > 64) return 0; break; } return 1; } EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc); /** * usb_gadget_check_config - checks if the UDC can support the binded * configuration * @gadget: controller to check the USB configuration * * Ensure that a UDC is able to support the requested resources by a * configuration, and that there are no resource limitations, such as * internal memory allocated to all requested endpoints. * * Returns zero on success, else a negative errno. */ int usb_gadget_check_config(struct usb_gadget *gadget) { if (gadget->ops->check_config) return gadget->ops->check_config(gadget); return 0; } EXPORT_SYMBOL_GPL(usb_gadget_check_config); /* ------------------------------------------------------------------------- */ static void usb_gadget_state_work(struct work_struct *work) { struct usb_gadget *gadget = work_to_gadget(work); struct usb_udc *udc = gadget->udc; if (udc) sysfs_notify(&udc->dev.kobj, NULL, "state"); } void usb_gadget_set_state(struct usb_gadget *gadget, enum usb_device_state state) { gadget->state = state; schedule_work(&gadget->work); } EXPORT_SYMBOL_GPL(usb_gadget_set_state); /* ------------------------------------------------------------------------- */ /* Acquire connect_lock before calling this function. */ static int usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { if (udc->vbus) return usb_gadget_connect_locked(udc->gadget); else return usb_gadget_disconnect_locked(udc->gadget); } static void vbus_event_work(struct work_struct *work) { struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work); mutex_lock(&udc->connect_lock); usb_udc_connect_control_locked(udc); mutex_unlock(&udc->connect_lock); } /** * usb_udc_vbus_handler - updates the udc core vbus status, and try to * connect or disconnect gadget * @gadget: The gadget which vbus change occurs * @status: The vbus status * * The udc driver calls it when it wants to connect or disconnect gadget * according to vbus status. * * This function can be invoked from interrupt context by irq handlers of * the gadget drivers, however, usb_udc_connect_control() has to run in * non-atomic context due to the following: * a. Some of the gadget driver implementations expect the ->pullup * callback to be invoked in non-atomic context. * b. usb_gadget_disconnect() acquires udc_lock which is a mutex. * Hence offload invocation of usb_udc_connect_control() to workqueue. */ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status) { struct usb_udc *udc = gadget->udc; if (udc) { udc->vbus = status; schedule_work(&udc->vbus_work); } } EXPORT_SYMBOL_GPL(usb_udc_vbus_handler); /** * usb_gadget_udc_reset - notifies the udc core that bus reset occurs * @gadget: The gadget which bus reset occurs * @driver: The gadget driver we want to notify * * If the udc driver has bus reset handler, it needs to call this when the bus * reset occurs, it notifies the gadget driver that the bus reset occurs as * well as updates gadget state. */ void usb_gadget_udc_reset(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { driver->reset(gadget); usb_gadget_set_state(gadget, USB_STATE_DEFAULT); } EXPORT_SYMBOL_GPL(usb_gadget_udc_reset); /** * usb_gadget_udc_start_locked - tells usb device controller to start up * @udc: The UDC to be started * * This call is issued by the UDC Class driver when it's about * to register a gadget driver to the device controller, before * calling gadget driver's bind() method. * * It allows the controller to be powered off until strictly * necessary to have it powered on. * * Returns zero on success, else negative errno. * * Caller should acquire connect_lock before invoking this function. */ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { int ret; if (udc->started) { dev_err(&udc->dev, "UDC had already started\n"); return -EBUSY; } ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver); if (!ret) udc->started = true; return ret; } /** * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore * @udc: The UDC to be stopped * * This call is issued by the UDC Class driver after calling * gadget driver's unbind() method. * * The details are implementation specific, but it can go as * far as powering off UDC completely and disable its data * line pullups. * * Caller should acquire connect lock before invoking this function. */ static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { if (!udc->started) { dev_err(&udc->dev, "UDC had already stopped\n"); return; } udc->gadget->ops->udc_stop(udc->gadget); udc->started = false; } /** * usb_gadget_udc_set_speed - tells usb device controller speed supported by * current driver * @udc: The device we want to set maximum speed * @speed: The maximum speed to allowed to run * * This call is issued by the UDC Class driver before calling * usb_gadget_udc_start() in order to make sure that we don't try to * connect on speeds the gadget driver doesn't support. */ static inline void usb_gadget_udc_set_speed(struct usb_udc *udc, enum usb_device_speed speed) { struct usb_gadget *gadget = udc->gadget; enum usb_device_speed s; if (speed == USB_SPEED_UNKNOWN) s = gadget->max_speed; else s = min(speed, gadget->max_speed); if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate) gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate); else if (gadget->ops->udc_set_speed) gadget->ops->udc_set_speed(gadget, s); } /** * usb_gadget_enable_async_callbacks - tell usb device controller to enable asynchronous callbacks * @udc: The UDC which should enable async callbacks * * This routine is used when binding gadget drivers. It undoes the effect * of usb_gadget_disable_async_callbacks(); the UDC driver should enable IRQs * (if necessary) and resume issuing callbacks. * * This routine will always be called in process context. */ static inline void usb_gadget_enable_async_callbacks(struct usb_udc *udc) { struct usb_gadget *gadget = udc->gadget; if (gadget->ops->udc_async_callbacks) gadget->ops->udc_async_callbacks(gadget, true); } /** * usb_gadget_disable_async_callbacks - tell usb device controller to disable asynchronous callbacks * @udc: The UDC which should disable async callbacks * * This routine is used when unbinding gadget drivers. It prevents a race: * The UDC driver doesn't know when the gadget driver's ->unbind callback * runs, so unless it is told to disable asynchronous callbacks, it might * issue a callback (such as ->disconnect) after the unbind has completed. * * After this function runs, the UDC driver must suppress all ->suspend, * ->resume, ->disconnect, ->reset, and ->setup callbacks to the gadget driver * until async callbacks are again enabled. A simple-minded but effective * way to accomplish this is to tell the UDC hardware not to generate any * more IRQs. * * Request completion callbacks must still be issued. However, it's okay * to defer them until the request is cancelled, since the pull-up will be * turned off during the time period when async callbacks are disabled. * * This routine will always be called in process context. */ static inline void usb_gadget_disable_async_callbacks(struct usb_udc *udc) { struct usb_gadget *gadget = udc->gadget; if (gadget->ops->udc_async_callbacks) gadget->ops->udc_async_callbacks(gadget, false); } /** * usb_udc_release - release the usb_udc struct * @dev: the dev member within usb_udc * * This is called by driver's core in order to free memory once the last * reference is released. */ static void usb_udc_release(struct device *dev) { struct usb_udc *udc; udc = container_of(dev, struct usb_udc, dev); dev_dbg(dev, "releasing '%s'\n", dev_name(dev)); kfree(udc); } static const struct attribute_group *usb_udc_attr_groups[]; static void usb_udc_nop_release(struct device *dev) { dev_vdbg(dev, "%s\n", __func__); } /** * usb_initialize_gadget - initialize a gadget and its embedded struct device * @parent: the parent device to this udc. Usually the controller driver's * device. * @gadget: the gadget to be initialized. * @release: a gadget release function. */ void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)) { INIT_WORK(&gadget->work, usb_gadget_state_work); gadget->dev.parent = parent; if (release) gadget->dev.release = release; else gadget->dev.release = usb_udc_nop_release; device_initialize(&gadget->dev); gadget->dev.bus = &gadget_bus_type; } EXPORT_SYMBOL_GPL(usb_initialize_gadget); /** * usb_add_gadget - adds a new gadget to the udc class driver list * @gadget: the gadget to be added to the list. * * Returns zero on success, negative errno otherwise. * Does not do a final usb_put_gadget() if an error occurs. */ int usb_add_gadget(struct usb_gadget *gadget) { struct usb_udc *udc; int ret = -ENOMEM; udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) goto error; device_initialize(&udc->dev); udc->dev.release = usb_udc_release; udc->dev.class = &udc_class; udc->dev.groups = usb_udc_attr_groups; udc->dev.parent = gadget->dev.parent; ret = dev_set_name(&udc->dev, "%s", kobject_name(&gadget->dev.parent->kobj)); if (ret) goto err_put_udc; udc->gadget = gadget; gadget->udc = udc; mutex_init(&udc->connect_lock); udc->started = false; mutex_lock(&udc_lock); list_add_tail(&udc->list, &udc_list); mutex_unlock(&udc_lock); INIT_WORK(&udc->vbus_work, vbus_event_work); ret = device_add(&udc->dev); if (ret) goto err_unlist_udc; usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); udc->vbus = true; ret = ida_alloc(&gadget_id_numbers, GFP_KERNEL); if (ret < 0) goto err_del_udc; gadget->id_number = ret; dev_set_name(&gadget->dev, "gadget.%d", ret); ret = device_add(&gadget->dev); if (ret) goto err_free_id; ret = sysfs_create_link(&udc->dev.kobj, &gadget->dev.kobj, "gadget"); if (ret) goto err_del_gadget; return 0; err_del_gadget: device_del(&gadget->dev); err_free_id: ida_free(&gadget_id_numbers, gadget->id_number); err_del_udc: flush_work(&gadget->work); device_del(&udc->dev); err_unlist_udc: mutex_lock(&udc_lock); list_del(&udc->list); mutex_unlock(&udc_lock); err_put_udc: put_device(&udc->dev); error: return ret; } EXPORT_SYMBOL_GPL(usb_add_gadget); /** * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list * @parent: the parent device to this udc. Usually the controller driver's * device. * @gadget: the gadget to be added to the list. * @release: a gadget release function. * * Returns zero on success, negative errno otherwise. * Calls the gadget release function in the latter case. */ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)) { int ret; usb_initialize_gadget(parent, gadget, release); ret = usb_add_gadget(gadget); if (ret) usb_put_gadget(gadget); return ret; } EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release); /** * usb_get_gadget_udc_name - get the name of the first UDC controller * This functions returns the name of the first UDC controller in the system. * Please note that this interface is usefull only for legacy drivers which * assume that there is only one UDC controller in the system and they need to * get its name before initialization. There is no guarantee that the UDC * of the returned name will be still available, when gadget driver registers * itself. * * Returns pointer to string with UDC controller name on success, NULL * otherwise. Caller should kfree() returned string. */ char *usb_get_gadget_udc_name(void) { struct usb_udc *udc; char *name = NULL; /* For now we take the first available UDC */ mutex_lock(&udc_lock); list_for_each_entry(udc, &udc_list, list) { if (!udc->driver) { name = kstrdup(udc->gadget->name, GFP_KERNEL); break; } } mutex_unlock(&udc_lock); return name; } EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name); /** * usb_add_gadget_udc - adds a new gadget to the udc class driver list * @parent: the parent device to this udc. Usually the controller * driver's device. * @gadget: the gadget to be added to the list * * Returns zero on success, negative errno otherwise. */ int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget) { return usb_add_gadget_udc_release(parent, gadget, NULL); } EXPORT_SYMBOL_GPL(usb_add_gadget_udc); /** * usb_del_gadget - deletes a gadget and unregisters its udc * @gadget: the gadget to be deleted. * * This will unbind @gadget, if it is bound. * It will not do a final usb_put_gadget(). */ void usb_del_gadget(struct usb_gadget *gadget) { struct usb_udc *udc = gadget->udc; if (!udc) return; dev_vdbg(gadget->dev.parent, "unregistering gadget\n"); mutex_lock(&udc_lock); list_del(&udc->list); mutex_unlock(&udc_lock); kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE); sysfs_remove_link(&udc->dev.kobj, "gadget"); device_del(&gadget->dev); flush_work(&gadget->work); ida_free(&gadget_id_numbers, gadget->id_number); cancel_work_sync(&udc->vbus_work); device_unregister(&udc->dev); } EXPORT_SYMBOL_GPL(usb_del_gadget); /** * usb_del_gadget_udc - unregisters a gadget * @gadget: the gadget to be unregistered. * * Calls usb_del_gadget() and does a final usb_put_gadget(). */ void usb_del_gadget_udc(struct usb_gadget *gadget) { usb_del_gadget(gadget); usb_put_gadget(gadget); } EXPORT_SYMBOL_GPL(usb_del_gadget_udc); /* ------------------------------------------------------------------------- */ static int gadget_match_driver(struct device *dev, const struct device_driver *drv) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_udc *udc = gadget->udc; struct usb_gadget_driver *driver = container_of(drv, struct usb_gadget_driver, driver); /* If the driver specifies a udc_name, it must match the UDC's name */ if (driver->udc_name && strcmp(driver->udc_name, dev_name(&udc->dev)) != 0) return 0; /* If the driver is already bound to a gadget, it doesn't match */ if (driver->is_bound) return 0; /* Otherwise any gadget driver matches any UDC */ return 1; } static int gadget_bind_driver(struct device *dev) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_udc *udc = gadget->udc; struct usb_gadget_driver *driver = container_of(dev->driver, struct usb_gadget_driver, driver); int ret = 0; mutex_lock(&udc_lock); if (driver->is_bound) { mutex_unlock(&udc_lock); return -ENXIO; /* Driver binds to only one gadget */ } driver->is_bound = true; udc->driver = driver; mutex_unlock(&udc_lock); dev_dbg(&udc->dev, "binding gadget driver [%s]\n", driver->function); usb_gadget_udc_set_speed(udc, driver->max_speed); ret = driver->bind(udc->gadget, driver); if (ret) goto err_bind; mutex_lock(&udc->connect_lock); ret = usb_gadget_udc_start_locked(udc); if (ret) { mutex_unlock(&udc->connect_lock); goto err_start; } usb_gadget_enable_async_callbacks(udc); udc->allow_connect = true; ret = usb_udc_connect_control_locked(udc); if (ret) goto err_connect_control; mutex_unlock(&udc->connect_lock); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); return 0; err_connect_control: udc->allow_connect = false; usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); err_start: driver->unbind(udc->gadget); err_bind: if (ret != -EISNAM) dev_err(&udc->dev, "failed to start %s: %d\n", driver->function, ret); mutex_lock(&udc_lock); udc->driver = NULL; driver->is_bound = false; mutex_unlock(&udc_lock); return ret; } static void gadget_unbind_driver(struct device *dev) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_udc *udc = gadget->udc; struct usb_gadget_driver *driver = udc->driver; dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function); udc->allow_connect = false; cancel_work_sync(&udc->vbus_work); mutex_lock(&udc->connect_lock); usb_gadget_disconnect_locked(gadget); usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); mutex_unlock(&udc->connect_lock); udc->driver->unbind(gadget); mutex_lock(&udc->connect_lock); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); mutex_lock(&udc_lock); driver->is_bound = false; udc->driver = NULL; mutex_unlock(&udc_lock); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); } /* ------------------------------------------------------------------------- */ int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver, struct module *owner, const char *mod_name) { int ret; if (!driver || !driver->bind || !driver->setup) return -EINVAL; driver->driver.bus = &gadget_bus_type; driver->driver.owner = owner; driver->driver.mod_name = mod_name; driver->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; ret = driver_register(&driver->driver); if (ret) { pr_warn("%s: driver registration failed: %d\n", driver->function, ret); return ret; } mutex_lock(&udc_lock); if (!driver->is_bound) { if (driver->match_existing_only) { pr_warn("%s: couldn't find an available UDC or it's busy\n", driver->function); ret = -EBUSY; } else { pr_info("%s: couldn't find an available UDC\n", driver->function); ret = 0; } } mutex_unlock(&udc_lock); if (ret) driver_unregister(&driver->driver); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_register_driver_owner); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { if (!driver || !driver->unbind) return -EINVAL; driver_unregister(&driver->driver); return 0; } EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver); /* ------------------------------------------------------------------------- */ static ssize_t srp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); if (sysfs_streq(buf, "1")) usb_gadget_wakeup(udc->gadget); return n; } static DEVICE_ATTR_WO(srp); static ssize_t soft_connect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); ssize_t ret; device_lock(&udc->gadget->dev); if (!udc->driver) { dev_err(dev, "soft-connect without a gadget driver\n"); ret = -EOPNOTSUPP; goto out; } if (sysfs_streq(buf, "connect")) { mutex_lock(&udc->connect_lock); usb_gadget_udc_start_locked(udc); usb_gadget_connect_locked(udc->gadget); mutex_unlock(&udc->connect_lock); } else if (sysfs_streq(buf, "disconnect")) { mutex_lock(&udc->connect_lock); usb_gadget_disconnect_locked(udc->gadget); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); } else { dev_err(dev, "unsupported command '%s'\n", buf); ret = -EINVAL; goto out; } ret = n; out: device_unlock(&udc->gadget->dev); return ret; } static DEVICE_ATTR_WO(soft_connect); static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); struct usb_gadget *gadget = udc->gadget; return sprintf(buf, "%s\n", usb_state_string(gadget->state)); } static DEVICE_ATTR_RO(state); static ssize_t function_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); struct usb_gadget_driver *drv; int rc = 0; mutex_lock(&udc_lock); drv = udc->driver; if (drv && drv->function) rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function); mutex_unlock(&udc_lock); return rc; } static DEVICE_ATTR_RO(function); #define USB_UDC_SPEED_ATTR(name, param) \ ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \ return scnprintf(buf, PAGE_SIZE, "%s\n", \ usb_speed_string(udc->gadget->param)); \ } \ static DEVICE_ATTR_RO(name) static USB_UDC_SPEED_ATTR(current_speed, speed); static USB_UDC_SPEED_ATTR(maximum_speed, max_speed); #define USB_UDC_ATTR(name) \ ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \ struct usb_gadget *gadget = udc->gadget; \ \ return scnprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ } \ static DEVICE_ATTR_RO(name) static USB_UDC_ATTR(is_otg); static USB_UDC_ATTR(is_a_peripheral); static USB_UDC_ATTR(b_hnp_enable); static USB_UDC_ATTR(a_hnp_support); static USB_UDC_ATTR(a_alt_hnp_support); static USB_UDC_ATTR(is_selfpowered); static struct attribute *usb_udc_attrs[] = { &dev_attr_srp.attr, &dev_attr_soft_connect.attr, &dev_attr_state.attr, &dev_attr_function.attr, &dev_attr_current_speed.attr, &dev_attr_maximum_speed.attr, &dev_attr_is_otg.attr, &dev_attr_is_a_peripheral.attr, &dev_attr_b_hnp_enable.attr, &dev_attr_a_hnp_support.attr, &dev_attr_a_alt_hnp_support.attr, &dev_attr_is_selfpowered.attr, NULL, }; static const struct attribute_group usb_udc_attr_group = { .attrs = usb_udc_attrs, }; static const struct attribute_group *usb_udc_attr_groups[] = { &usb_udc_attr_group, NULL, }; static int usb_udc_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct usb_udc *udc = container_of(dev, struct usb_udc, dev); int ret; ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name); if (ret) { dev_err(dev, "failed to add uevent USB_UDC_NAME\n"); return ret; } mutex_lock(&udc_lock); if (udc->driver) ret = add_uevent_var(env, "USB_UDC_DRIVER=%s", udc->driver->function); mutex_unlock(&udc_lock); if (ret) { dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n"); return ret; } return 0; } static const struct class udc_class = { .name = "udc", .dev_uevent = usb_udc_uevent, }; static const struct bus_type gadget_bus_type = { .name = "gadget", .probe = gadget_bind_driver, .remove = gadget_unbind_driver, .match = gadget_match_driver, }; static int __init usb_udc_init(void) { int rc; rc = class_register(&udc_class); if (rc) return rc; rc = bus_register(&gadget_bus_type); if (rc) class_unregister(&udc_class); return rc; } subsys_initcall(usb_udc_init); static void __exit usb_udc_exit(void) { bus_unregister(&gadget_bus_type); class_unregister(&udc_class); } module_exit(usb_udc_exit); MODULE_DESCRIPTION("UDC Framework"); MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); MODULE_LICENSE("GPL v2");
2 4 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 // SPDX-License-Identifier: GPL-2.0-only /* * HID driver for Aureal Cy se W-01RN USB_V3.1 devices * * Copyright (c) 2010 Franco Catrin <fcatrin@gmail.com> * Copyright (c) 2010 Ben Cropley <bcropley@internode.on.net> * * Based on HID sunplus driver by * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static const __u8 *aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { dev_info(&hdev->dev, "fixing Aureal Cy se W-01RN USB_V3.1 report descriptor.\n"); rdesc[53] = 0x65; } return rdesc; } static const struct hid_device_id aureal_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, { } }; MODULE_DEVICE_TABLE(hid, aureal_devices); static struct hid_driver aureal_driver = { .name = "aureal", .id_table = aureal_devices, .report_fixup = aureal_report_fixup, }; module_hid_driver(aureal_driver); MODULE_DESCRIPTION("HID driver for Aureal Cy se W-01RN USB_V3.1 devices"); MODULE_LICENSE("GPL");
191 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * acpi.h - ACPI Interface * * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> */ #ifndef _LINUX_ACPI_H #define _LINUX_ACPI_H #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ #include <linux/resource_ext.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/property.h> #include <linux/uuid.h> #include <linux/node.h> struct irq_domain; struct irq_domain_ops; #ifndef _LINUX #define _LINUX #endif #include <acpi/acpi.h> #include <acpi/acpi_numa.h> #ifdef CONFIG_ACPI #include <linux/list.h> #include <linux/dynamic_debug.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/fw_table.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_io.h> #include <asm/acpi.h> #ifdef CONFIG_ACPI_TABLE_LIB #define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI") #define __init_or_acpilib #define __initdata_or_acpilib #else #define EXPORT_SYMBOL_ACPI_LIB(x) #define __init_or_acpilib __init #define __initdata_or_acpilib __initdata #endif static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return adev ? adev->handle : NULL; } #define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) #define ACPI_HANDLE_FWNODE(fwnode) \ acpi_device_handle(to_acpi_device_node(fwnode)) static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) { struct fwnode_handle *fwnode; fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); if (!fwnode) return NULL; fwnode_init(fwnode, &acpi_static_fwnode_ops); return fwnode; } static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) { if (WARN_ON(!is_acpi_static_node(fwnode))) return; kfree(fwnode); } static inline bool has_acpi_companion(struct device *dev) { return is_acpi_device_node(dev->fwnode); } static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); } static inline const char *acpi_dev_name(struct acpi_device *adev) { return dev_name(&adev->dev); } struct device *acpi_get_first_physical_node(struct acpi_device *adev); enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC, ACPI_IRQ_MODEL_IOSAPIC, ACPI_IRQ_MODEL_PLATFORM, ACPI_IRQ_MODEL_GIC, ACPI_IRQ_MODEL_LPIC, ACPI_IRQ_MODEL_RINTC, ACPI_IRQ_MODEL_COUNT }; extern enum acpi_irq_model_id acpi_irq_model; enum acpi_interrupt_id { ACPI_INTERRUPT_PMI = 1, ACPI_INTERRUPT_INIT, ACPI_INTERRUPT_CPEI, ACPI_INTERRUPT_COUNT }; #define ACPI_SPACE_MEM 0 enum acpi_address_range_id { ACPI_ADDRESS_RANGE_MEMORY = 1, ACPI_ADDRESS_RANGE_RESERVED = 2, ACPI_ADDRESS_RANGE_ACPI = 3, ACPI_ADDRESS_RANGE_NVS = 4, ACPI_ADDRESS_RANGE_COUNT }; /* Table Handlers */ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); /* Debugger support */ struct acpi_debugger_ops { int (*create_thread)(acpi_osd_exec_callback function, void *context); ssize_t (*write_log)(const char *msg); ssize_t (*read_cmd)(char *buffer, size_t length); int (*wait_command_ready)(bool single_step, char *buffer, size_t length); int (*notify_command_complete)(void); }; struct acpi_debugger { const struct acpi_debugger_ops *ops; struct module *owner; struct mutex lock; }; #ifdef CONFIG_ACPI_DEBUGGER int __init acpi_debugger_init(void); int acpi_register_debugger(struct module *owner, const struct acpi_debugger_ops *ops); void acpi_unregister_debugger(const struct acpi_debugger_ops *ops); int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context); ssize_t acpi_debugger_write_log(const char *msg); ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length); int acpi_debugger_wait_command_ready(void); int acpi_debugger_notify_command_complete(void); #else static inline int acpi_debugger_init(void) { return -ENODEV; } static inline int acpi_register_debugger(struct module *owner, const struct acpi_debugger_ops *ops) { return -ENODEV; } static inline void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) { } static inline int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) { return -ENODEV; } static inline int acpi_debugger_write_log(const char *msg) { return -ENODEV; } static inline int acpi_debugger_read_cmd(char *buffer, u32 buffer_length) { return -ENODEV; } static inline int acpi_debugger_wait_command_ready(void) { return -ENODEV; } static inline int acpi_debugger_notify_command_complete(void) { return -ENODEV; } #endif #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); void __acpi_unmap_table(void __iomem *map, unsigned long size); int early_acpi_boot_init(void); int acpi_boot_init (void); void acpi_boot_table_prepare (void); void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); int acpi_locate_initial_tables (void); void acpi_reserve_initial_tables (void); void acpi_table_init_complete (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init_or_acpilib acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, unsigned int max_entries); int __init_or_acpilib acpi_table_parse_entries_array(char *id, unsigned long table_size, struct acpi_subtable_proc *proc, int proc_num, unsigned int max_entries); int acpi_table_parse_madt(enum acpi_madt_type id, acpi_tbl_entry_handler handler, unsigned int max_entries); int __init_or_acpilib acpi_table_parse_cedt(enum acpi_cedt_type id, acpi_tbl_entry_handler_arg handler_arg, void *arg); int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } #endif void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); #if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) void acpi_arch_dma_setup(struct device *dev); #else static inline void acpi_arch_dma_setup(struct device *dev) { } #endif #ifdef CONFIG_ARM64 void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); #else static inline void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } #endif #ifdef CONFIG_RISCV void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa); #else static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { } #endif #ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) #endif static inline bool invalid_logical_cpuid(u32 cpuid) { return (int)cpuid < 0; } static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) { return phys_id == PHYS_CPUID_INVALID; } int __init acpi_get_madt_revision(void); /* Validate the processor object's proc_id */ bool acpi_duplicate_processor_id(int proc_id); /* Processor _CTS control */ struct acpi_processor_power; #ifdef CONFIG_ACPI_PROCESSOR_CSTATE bool acpi_processor_claim_cst_control(void); int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, struct acpi_processor_power *info); #else static inline bool acpi_processor_claim_cst_control(void) { return false; } static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, struct acpi_processor_power *info) { return -ENODEV; } #endif #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu); int acpi_unmap_cpu(int cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ acpi_handle acpi_get_processor_handle(int cpu); #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; extern u32 acpi_irq_not_handled; extern unsigned int acpi_sci_irq; extern bool acpi_no_s5; #define INVALID_ACPI_IRQ ((unsigned)-1) static inline bool acpi_sci_irq_valid(void) { return acpi_sci_irq != INVALID_ACPI_IRQ; } extern int sbf_port; int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *(*)(u32)); void acpi_set_gsi_to_irq_fallback(u32 (*)(u32)); struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, unsigned int size, struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data); #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) { return -1; } #endif /* * This function undoes the effect of one call to acpi_register_gsi(). * If this matches the last registration, any IRQ resources for gsi * are freed. */ void acpi_unregister_gsi (u32 gsi); struct pci_dev; struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin); int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); #ifdef CONFIG_PCI void acpi_penalize_sci_irq(int irq, int trigger, int polarity); #else static inline void acpi_penalize_sci_irq(int irq, int trigger, int polarity) { } #endif void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); extern int ec_write(u8 addr, u8 val); extern int ec_transaction(u8 command, const u8 *wdata, unsigned wdata_len, u8 *rdata, unsigned rdata_len); extern acpi_handle ec_get_handle(void); extern bool acpi_is_pnp_device(struct acpi_device *); #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) typedef void (*wmi_notify_handler) (union acpi_object *data, void *context); int wmi_instance_count(const char *guid); extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out); extern acpi_status wmi_query_block(const char *guid, u8 instance, struct acpi_buffer *out); extern acpi_status wmi_set_block(const char *guid, u8 instance, const struct acpi_buffer *in); extern acpi_status wmi_install_notify_handler(const char *guid, wmi_notify_handler handler, void *data); extern acpi_status wmi_remove_notify_handler(const char *guid); extern bool wmi_has_guid(const char *guid); extern char *wmi_get_acpi_device_uid(const char *guid); #endif /* CONFIG_ACPI_WMI */ #define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 #define ACPI_VIDEO_DEVICE_POSTING 0x0002 #define ACPI_VIDEO_ROM_AVAILABLE 0x0004 #define ACPI_VIDEO_BACKLIGHT 0x0008 #define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 #define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 #define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 #define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 extern char acpi_video_backlight_string[]; extern long acpi_is_video_device(acpi_handle handle); extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); #ifdef CONFIG_ACPI_THERMAL_LIB int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp); int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp); int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp); int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); #endif #ifdef CONFIG_ACPI_HMAT int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord); #else static inline int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord) { return -EOPNOTSUPP; } #endif #ifdef CONFIG_ACPI_NUMA int acpi_map_pxm_to_node(int pxm); int acpi_get_node(acpi_handle handle); /** * pxm_to_online_node - Map proximity ID to online node * @pxm: ACPI proximity ID * * This is similar to pxm_to_node(), but always returns an online * node. When the mapped node from a given proximity ID is offline, it * looks up the node distance table and returns the nearest online node. * * ACPI device drivers, which are called after the NUMA initialization has * completed in the kernel, can call this interface to obtain their device * NUMA topology from ACPI tables. Such drivers do not have to deal with * offline nodes. A node may be offline when SRAT memory entry does not exist, * or NUMA is disabled, ex. "numa=off" on x86. */ static inline int pxm_to_online_node(int pxm) { int node = pxm_to_node(pxm); return numa_map_to_online_node(node); } #else static inline int pxm_to_online_node(int pxm) { return 0; } static inline int acpi_map_pxm_to_node(int pxm) { return 0; } static inline int acpi_get_node(acpi_handle handle) { return 0; } #endif extern int pnpacpi_disabled; #define PXM_INVAL (-1) bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); bool acpi_dev_resource_address_space(struct acpi_resource *ares, struct resource_win *win); bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, struct resource_win *win); unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable); unsigned int acpi_dev_get_irq_type(int triggering, int polarity); bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, struct resource *res); void acpi_dev_free_resource_list(struct list_head *list); int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, int (*preproc)(struct acpi_resource *, void *), void *preproc_data); int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list); int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list); int acpi_dev_filter_resource_type(struct acpi_resource *ares, unsigned long types); static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, void *arg) { return acpi_dev_filter_resource_type(ares, (unsigned long)arg); } struct acpi_device *acpi_resource_consumer(struct resource *res); int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name); int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION extern int acpi_check_s4_hw_signature; #endif #ifdef CONFIG_PM_SLEEP void __init acpi_old_suspend_ordering(void); void __init acpi_nvs_nosave(void); void __init acpi_nvs_nosave_s3(void); void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ int acpi_register_wakeup_handler( int wake_irq, bool (*wakeup)(void *context), void *context); void acpi_unregister_wakeup_handler( bool (*wakeup)(void *context), void *context); struct acpi_osc_context { char *uuid_str; /* UUID string */ int rev; struct acpi_buffer cap; /* list of DWORD capabilities */ struct acpi_buffer ret; /* free by caller if success */ }; acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); /* Number of _OSC capability DWORDS depends on bridge type */ #define OSC_PCI_CAPABILITY_DWORDS 3 #define OSC_CXL_CAPABILITY_DWORDS 5 /* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */ #define OSC_QUERY_DWORD 0 /* DWORD 1 */ #define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ #define OSC_CONTROL_DWORD 2 /* DWORD 3 */ #define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */ #define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */ /* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ #define OSC_QUERY_ENABLE 0x00000001 /* input */ #define OSC_REQUEST_ERROR 0x00000002 /* return */ #define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ #define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ #define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ /* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ #define OSC_SB_PAD_SUPPORT 0x00000001 #define OSC_SB_PPC_OST_SUPPORT 0x00000002 #define OSC_SB_PR3_SUPPORT 0x00000004 #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 #define OSC_SB_APEI_SUPPORT 0x00000010 #define OSC_SB_CPC_SUPPORT 0x00000020 #define OSC_SB_CPCV2_SUPPORT 0x00000040 #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 #define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200 #define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400 #define OSC_SB_GED_SUPPORT 0x00000800 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 #define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000 #define OSC_SB_PRM_SUPPORT 0x00200000 #define OSC_SB_FFH_OPR_SUPPORT 0x00400000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; extern bool osc_sb_native_usb4_support_confirmed; extern bool osc_sb_cppc2_support_acked; extern bool osc_cpc_flexible_adr_space_confirmed; /* USB4 Capabilities */ #define OSC_USB_USB3_TUNNELING 0x00000001 #define OSC_USB_DP_TUNNELING 0x00000002 #define OSC_USB_PCIE_TUNNELING 0x00000004 #define OSC_USB_XDOMAIN 0x00000008 extern u32 osc_sb_native_usb4_control; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 #define OSC_PCI_ASPM_SUPPORT 0x00000002 #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 #define OSC_PCI_MSI_SUPPORT 0x00000010 #define OSC_PCI_EDR_SUPPORT 0x00000080 #define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 #define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 #define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 #define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 #define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080 /* CXL _OSC: Capabilities DWORD 4: Support Field */ #define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001 #define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002 #define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004 #define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008 /* CXL _OSC: Capabilities DWORD 5: Control Field */ #define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001 static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context) { u32 *ret = context->ret.pointer; return ret[OSC_CONTROL_DWORD]; } static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context) { u32 *ret = context->ret.pointer; return ret[OSC_EXT_CONTROL_DWORD]; } #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 #define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006 #define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008 #define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A #define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B #define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C #define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D #define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E #define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F /* Enable _OST when all relevant hotplug operations are enabled */ #if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ defined(CONFIG_ACPI_CONTAINER) #define ACPI_HOTPLUG_OST #endif /* _OST Source Event Code (OSPM Action) */ #define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 #define ACPI_OST_EC_OSPM_EJECT 0x103 #define ACPI_OST_EC_OSPM_INSERTION 0x200 /* _OST General Processing Status Code */ #define ACPI_OST_SC_SUCCESS 0x0 #define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 #define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 /* _OST OS Shutdown Processing (0x100) Status Code */ #define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 #define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 #define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 #define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 /* _OST Ejection Request (0x3, 0x103) Status Code */ #define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 #define ACPI_OST_SC_DEVICE_IN_USE 0x81 #define ACPI_OST_SC_DEVICE_BUSY 0x82 #define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 #define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 /* _OST Insertion Request (0x200) Status Code */ #define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 #define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 enum acpi_predicate { all_versions, less_than_or_equal, equal, greater_than_or_equal, }; /* Table must be terminted by a NULL entry */ struct acpi_platform_list { char oem_id[ACPI_OEM_ID_SIZE+1]; char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1]; u32 oem_revision; char *table; enum acpi_predicate pred; char *reason; u32 data; }; int acpi_match_platform_list(const struct acpi_platform_list *plat); extern void acpi_early_init(void); extern void acpi_subsystem_init(void); extern int acpi_nvs_register(__u64 start, __u64 size); extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), void *data); const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids, const struct acpi_device *adev); const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); const void *acpi_device_get_match_data(const struct device *dev); extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); struct platform_device *acpi_create_platform_device(struct acpi_device *, const struct property_entry *); #define ACPI_PTR(_ptr) (_ptr) static inline void acpi_device_set_enumerated(struct acpi_device *adev) { adev->flags.visited = true; } static inline void acpi_device_clear_enumerated(struct acpi_device *adev) { adev->flags.visited = false; } enum acpi_reconfig_event { ACPI_RECONFIG_DEVICE_ADD = 0, ACPI_RECONFIG_DEVICE_REMOVE, }; int acpi_reconfig_notifier_register(struct notifier_block *nb); int acpi_reconfig_notifier_unregister(struct notifier_block *nb); #ifdef CONFIG_ACPI_GTDT int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); int acpi_gtdt_map_ppi(int type); bool acpi_gtdt_c3stop(int type); int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); #endif #ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER static inline void acpi_arch_set_root_pointer(u64 addr) { } #endif #ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER static inline u64 acpi_arch_get_root_pointer(void) { return 0; } #endif int acpi_get_local_u64_address(acpi_handle handle, u64 *addr); int acpi_get_local_address(acpi_handle handle, u32 *addr); const char *acpi_get_subsystem_id(acpi_handle handle); #else /* !CONFIG_ACPI */ #define acpi_disabled 1 #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) #define ACPI_HANDLE_FWNODE(fwnode) (NULL) /* Get rid of the -Wunused-variable for adev */ #define acpi_dev_uid_match(adev, uid2) (adev && false) #define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false) struct fwnode_handle; static inline bool acpi_dev_found(const char *hid) { return false; } static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) { return false; } struct acpi_device; static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer) { return -ENODEV; } static inline struct acpi_device * acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) { return NULL; } static inline bool acpi_reduced_hardware(void) { return false; } static inline void acpi_dev_put(struct acpi_device *adev) {} static inline bool is_acpi_node(const struct fwnode_handle *fwnode) { return false; } static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) { return false; } static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) { return false; } static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, const char *name) { return false; } static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) { return NULL; } static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return NULL; } static inline bool has_acpi_companion(struct device *dev) { return false; } static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { } static inline const char *acpi_dev_name(struct acpi_device *adev) { return NULL; } static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev) { return NULL; } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } static inline int early_acpi_boot_init(void) { return 0; } static inline int acpi_boot_init(void) { return 0; } static inline void acpi_boot_table_prepare(void) { } static inline void acpi_boot_table_init(void) { } static inline int acpi_mps_check(void) { return 0; } static inline int acpi_check_resource_conflict(struct resource *res) { return 0; } static inline int acpi_check_region(resource_size_t start, resource_size_t n, const char *name) { return 0; } struct acpi_table_header; static inline int acpi_table_parse(char *id, int (*handler)(struct acpi_table_header *)) { return -ENODEV; } static inline int acpi_nvs_register(__u64 start, __u64 size) { return 0; } static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), void *data) { return 0; } struct acpi_device_id; static inline const struct acpi_device_id *acpi_match_acpi_device( const struct acpi_device_id *ids, const struct acpi_device *adev) { return NULL; } static inline const struct acpi_device_id *acpi_match_device( const struct acpi_device_id *ids, const struct device *dev) { return NULL; } static inline const void *acpi_device_get_match_data(const struct device *dev) { return NULL; } static inline bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { return false; } static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs) { return false; } static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4) { return NULL; } static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4, acpi_object_type type) { return NULL; } static inline int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } static inline int acpi_device_modalias(struct device *dev, char *buf, int size) { return -ENODEV; } static inline struct platform_device * acpi_create_platform_device(struct acpi_device *adev, const struct property_entry *properties) { return NULL; } static inline bool acpi_dma_supported(const struct acpi_device *adev) { return false; } static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) { return DEV_DMA_NOT_SUPPORTED; } static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) { return -ENODEV; } static inline int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) { return 0; } static inline int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, const u32 *input_id) { return 0; } #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) { } static inline void acpi_device_clear_enumerated(struct acpi_device *adev) { } static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) { return -EINVAL; } static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) { return -EINVAL; } static inline struct acpi_device *acpi_resource_consumer(struct resource *res) { return NULL; } static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) { return -ENODEV; } static inline const char *acpi_get_subsystem_id(acpi_handle handle) { return ERR_PTR(-ENODEV); } static inline int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), void *context) { return -ENXIO; } static inline void acpi_unregister_wakeup_handler( bool (*wakeup)(void *context), void *context) { } struct acpi_osc_context; static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context) { return 0; } static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context) { return 0; } static inline bool acpi_sleep_state_supported(u8 sleep_state) { return false; } static inline acpi_handle acpi_get_processor_handle(int cpu) { return NULL; } #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_HMAT int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid, resource_size_t *size); #else static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid, resource_size_t *size) { return -EOPNOTSUPP; } #endif extern void arch_post_acpi_subsys_init(void); #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_ioapic_add(acpi_handle root); #else static inline int acpi_ioapic_add(acpi_handle root) { return 0; } #endif #ifdef CONFIG_ACPI void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, u32 pm1a_ctrl, u32 pm1b_ctrl)); acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control); void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, u32 val_a, u32 val_b)); acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); #if defined(CONFIG_SUSPEND) && defined(CONFIG_X86) struct acpi_s2idle_dev_ops { struct list_head list_node; void (*prepare)(void); void (*check)(void); void (*restore)(void); }; int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); int acpi_get_lps0_constraint(struct acpi_device *adev); #else /* CONFIG_SUSPEND && CONFIG_X86 */ static inline int acpi_get_lps0_constraint(struct device *dev) { return ACPI_STATE_UNKNOWN; } #endif /* CONFIG_SUSPEND && CONFIG_X86 */ void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM) int acpi_dev_suspend(struct device *dev, bool wakeup); int acpi_dev_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); bool acpi_storage_d3(struct device *dev); bool acpi_dev_state_d0(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return 0; } static inline bool acpi_storage_d3(struct device *dev) { return false; } static inline bool acpi_dev_state_d0(struct device *dev) { return true; } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) int acpi_subsys_prepare(struct device *dev); void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); int acpi_subsys_suspend_noirq(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); int acpi_subsys_poweroff(struct device *dev); int acpi_subsys_restore_early(struct device *dev); #else static inline int acpi_subsys_prepare(struct device *dev) { return 0; } static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } static inline int acpi_subsys_restore_early(struct device *dev) { return 0; } #endif #if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP) void acpi_ec_mark_gpe_for_wake(void); void acpi_ec_set_gpe_wake_mask(u8 action); #else static inline void acpi_ec_mark_gpe_for_wake(void) {} static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif #ifdef CONFIG_ACPI char *acpi_handle_path(acpi_handle handle); __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...); void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, acpi_status status); #else /* !CONFIG_ACPI */ static inline __printf(3, 4) void acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} static inline void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, acpi_status status) {} #endif /* !CONFIG_ACPI */ #if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) __printf(3, 4) void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); #endif /* * acpi_handle_<level>: Print message with ACPI prefix and object path * * These interfaces acquire the global namespace mutex to obtain an object * path. In interrupt context, it shows the object path as <n/a>. */ #define acpi_handle_emerg(handle, fmt, ...) \ acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) #define acpi_handle_alert(handle, fmt, ...) \ acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) #define acpi_handle_crit(handle, fmt, ...) \ acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) #define acpi_handle_err(handle, fmt, ...) \ acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) #define acpi_handle_warn(handle, fmt, ...) \ acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) #define acpi_handle_notice(handle, fmt, ...) \ acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) #define acpi_handle_info(handle, fmt, ...) \ acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) #if defined(DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) #else #if defined(CONFIG_DYNAMIC_DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ _dynamic_func_call(fmt, __acpi_handle_debug, \ handle, pr_fmt(fmt), ##__VA_ARGS__) #else #define acpi_handle_debug(handle, fmt, ...) \ ({ \ if (0) \ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ 0; \ }) #endif #endif #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { return false; } static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { return false; } static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) { return -ENXIO; } #endif static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index, bool *wake_capable) { return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable); } static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id, int index) { return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL); } static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) { return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL); } /* Device properties */ #ifdef CONFIG_ACPI int acpi_dev_get_property(const struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, size_t num_args, struct fwnode_reference_args *args); static inline int acpi_node_get_property_reference( const struct fwnode_handle *fwnode, const char *name, size_t index, struct fwnode_reference_args *args) { return __acpi_node_get_property_reference(fwnode, name, index, NR_FWNODE_REFERENCE_ARGS, args); } static inline bool acpi_dev_has_props(const struct acpi_device *adev) { return !list_empty(&adev->data.properties); } struct acpi_device_properties * acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, union acpi_object *properties); int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr); struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child); struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); #define ACPI_TABLE_ID_LEN 5 /** * struct acpi_probe_entry - boot-time probing entry * @id: ACPI table name * @type: Optional subtable type to match * (if @id contains subtables) * @subtable_valid: Optional callback to check the validity of * the subtable * @probe_table: Callback to the driver being probed when table * match is successful * @probe_subtbl: Callback to the driver being probed when table and * subtable match (and optional callback is successful) * @driver_data: Sideband data provided back to the driver */ struct acpi_probe_entry { __u8 id[ACPI_TABLE_ID_LEN]; __u8 type; acpi_probe_entry_validate_subtbl subtable_valid; union { acpi_tbl_table_handler probe_table; acpi_tbl_entry_handler probe_subtbl; }; kernel_ulong_t driver_data; }; void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr); #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ __used __section("__" #table "_acpi_probe_table") = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ .probe_table = fn, \ .driver_data = data, \ } #define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ subtable, valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ __used __section("__" #table "_acpi_probe_table") = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ .probe_subtbl = fn, \ .driver_data = data, \ } #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); #define acpi_probe_device_table(t) \ ({ \ extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ ACPI_PROBE_TABLE_END(t); \ __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ (&ACPI_PROBE_TABLE_END(t) - \ &ACPI_PROBE_TABLE(t))); \ }) #else static inline int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj) { return -ENXIO; } static inline int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, size_t num_args, struct fwnode_reference_args *args) { return -ENXIO; } static inline int acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, struct fwnode_reference_args *args) { return -ENXIO; } static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr) { return -ENXIO; } static inline struct fwnode_handle * acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { return NULL; } static inline struct fwnode_handle * acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { return ERR_PTR(-ENXIO); } static inline int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle **remote, struct fwnode_handle **port, struct fwnode_handle **endpoint) { return -ENXIO; } #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ static const void * __acpi_table_##name[] \ __attribute__((unused)) \ = { (void *) table_id, \ (void *) subtable, \ (void *) valid, \ (void *) fn, \ (void *) data } #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif #ifdef CONFIG_ACPI_TABLE_UPGRADE void acpi_table_upgrade(void); #else static inline void acpi_table_upgrade(void) { } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG) extern bool acpi_has_watchdog(void); #else static inline bool acpi_has_watchdog(void) { return false; } #endif #ifdef CONFIG_ACPI_SPCR_TABLE extern bool qdf2400_e44_present; int acpi_parse_spcr(bool enable_earlycon, bool enable_console); #else static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console) { return 0; } #endif #if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI) int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res); #else static inline int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res) { return -EINVAL; } #endif #ifdef CONFIG_ACPI_LPIT int lpit_read_residency_count_address(u64 *address); #else static inline int lpit_read_residency_count_address(u64 *address) { return -EINVAL; } #endif #ifdef CONFIG_ACPI_PROCESSOR_IDLE #ifndef arch_get_idle_state_flags static inline unsigned int arch_get_idle_state_flags(u32 arch_flags) { return 0; } #endif #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ #ifdef CONFIG_ACPI_PPTT int acpi_pptt_cpu_is_thread(unsigned int cpu); int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology(unsigned int cpu, int level) { return -EINVAL; } static inline int find_acpi_cpu_topology_cluster(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology_package(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } #endif void acpi_arch_init(void); #ifdef CONFIG_ACPI_PCC void acpi_init_pcc(void); #else static inline void acpi_init_pcc(void) { } #endif #ifdef CONFIG_ACPI_FFH void acpi_init_ffh(void); extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt); extern int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context); #else static inline void acpi_init_ffh(void) { } #endif #ifdef CONFIG_ACPI extern void acpi_device_notify(struct device *dev); extern void acpi_device_notify_remove(struct device *dev); #else static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif static inline void acpi_use_parent_companion(struct device *dev) { ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent)); } #ifdef CONFIG_ACPI_HMAT int hmat_update_target_coordinates(int nid, struct access_coordinate *coord, enum access_coordinate_class access); #else static inline int hmat_update_target_coordinates(int nid, struct access_coordinate *coord, enum access_coordinate_class access) { return -EOPNOTSUPP; } #endif #ifdef CONFIG_ACPI_NUMA bool acpi_node_backed_by_real_pxm(int nid); #else static inline bool acpi_node_backed_by_real_pxm(int nid) { return false; } #endif #endif /*_LINUX_ACPI_H*/
11 2 2 14 8 8 8 8 8 3 2 2 8 5 8 5 8 5 14 14 14 14 14 14 14 11 11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include <linux/devcoredump.h> #include "main.h" #include "regd.h" #include "fw.h" #include "ps.h" #include "sec.h" #include "mac.h" #include "coex.h" #include "phy.h" #include "reg.h" #include "efuse.h" #include "tx.h" #include "debug.h" #include "bf.h" #include "sar.h" #include "sdio.h" #include "led.h" bool rtw_disable_lps_deep_mode; EXPORT_SYMBOL(rtw_disable_lps_deep_mode); bool rtw_bf_support = true; unsigned int rtw_debug_mask; EXPORT_SYMBOL(rtw_debug_mask); /* EDCCA is enabled during normal behavior. For debugging purpose in * a noisy environment, it can be disabled via edcca debugfs. Because * all rtw88 devices will probably be affected if environment is noisy, * rtw_edcca_enabled is just declared by driver instead of by device. * So, turning it off will take effect for all rtw88 devices before * there is a tough reason to maintain rtw_edcca_enabled by device. */ bool rtw_edcca_enabled = true; module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644); module_param_named(support_bf, rtw_bf_support, bool, 0644); module_param_named(debug_mask, rtw_debug_mask, uint, 0644); MODULE_PARM_DESC(disable_lps_deep, "Set Y to disable Deep PS"); MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support"); MODULE_PARM_DESC(debug_mask, "Debugging mask"); static struct ieee80211_channel rtw_channeltable_2g[] = { {.center_freq = 2412, .hw_value = 1,}, {.center_freq = 2417, .hw_value = 2,}, {.center_freq = 2422, .hw_value = 3,}, {.center_freq = 2427, .hw_value = 4,}, {.center_freq = 2432, .hw_value = 5,}, {.center_freq = 2437, .hw_value = 6,}, {.center_freq = 2442, .hw_value = 7,}, {.center_freq = 2447, .hw_value = 8,}, {.center_freq = 2452, .hw_value = 9,}, {.center_freq = 2457, .hw_value = 10,}, {.center_freq = 2462, .hw_value = 11,}, {.center_freq = 2467, .hw_value = 12,}, {.center_freq = 2472, .hw_value = 13,}, {.center_freq = 2484, .hw_value = 14,}, }; static struct ieee80211_channel rtw_channeltable_5g[] = { {.center_freq = 5180, .hw_value = 36,}, {.center_freq = 5200, .hw_value = 40,}, {.center_freq = 5220, .hw_value = 44,}, {.center_freq = 5240, .hw_value = 48,}, {.center_freq = 5260, .hw_value = 52,}, {.center_freq = 5280, .hw_value = 56,}, {.center_freq = 5300, .hw_value = 60,}, {.center_freq = 5320, .hw_value = 64,}, {.center_freq = 5500, .hw_value = 100,}, {.center_freq = 5520, .hw_value = 104,}, {.center_freq = 5540, .hw_value = 108,}, {.center_freq = 5560, .hw_value = 112,}, {.center_freq = 5580, .hw_value = 116,}, {.center_freq = 5600, .hw_value = 120,}, {.center_freq = 5620, .hw_value = 124,}, {.center_freq = 5640, .hw_value = 128,}, {.center_freq = 5660, .hw_value = 132,}, {.center_freq = 5680, .hw_value = 136,}, {.center_freq = 5700, .hw_value = 140,}, {.center_freq = 5720, .hw_value = 144,}, {.center_freq = 5745, .hw_value = 149,}, {.center_freq = 5765, .hw_value = 153,}, {.center_freq = 5785, .hw_value = 157,}, {.center_freq = 5805, .hw_value = 161,}, {.center_freq = 5825, .hw_value = 165, .flags = IEEE80211_CHAN_NO_HT40MINUS}, }; static struct ieee80211_rate rtw_ratetable[] = { {.bitrate = 10, .hw_value = 0x00,}, {.bitrate = 20, .hw_value = 0x01,}, {.bitrate = 55, .hw_value = 0x02,}, {.bitrate = 110, .hw_value = 0x03,}, {.bitrate = 60, .hw_value = 0x04,}, {.bitrate = 90, .hw_value = 0x05,}, {.bitrate = 120, .hw_value = 0x06,}, {.bitrate = 180, .hw_value = 0x07,}, {.bitrate = 240, .hw_value = 0x08,}, {.bitrate = 360, .hw_value = 0x09,}, {.bitrate = 480, .hw_value = 0x0a,}, {.bitrate = 540, .hw_value = 0x0b,}, }; static const struct ieee80211_iface_limit rtw_iface_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP), } }; static const struct ieee80211_iface_combination rtw_iface_combs[] = { { .limits = rtw_iface_limits, .n_limits = ARRAY_SIZE(rtw_iface_limits), .max_interfaces = 2, .num_different_channels = 1, } }; u16 rtw_desc_to_bitrate(u8 desc_rate) { struct ieee80211_rate rate; if (WARN(desc_rate >= ARRAY_SIZE(rtw_ratetable), "invalid desc rate\n")) return 0; rate = rtw_ratetable[desc_rate]; return rate.bitrate; } static const struct ieee80211_supported_band rtw_band_2ghz = { .band = NL80211_BAND_2GHZ, .channels = rtw_channeltable_2g, .n_channels = ARRAY_SIZE(rtw_channeltable_2g), .bitrates = rtw_ratetable, .n_bitrates = ARRAY_SIZE(rtw_ratetable), .ht_cap = {0}, .vht_cap = {0}, }; static const struct ieee80211_supported_band rtw_band_5ghz = { .band = NL80211_BAND_5GHZ, .channels = rtw_channeltable_5g, .n_channels = ARRAY_SIZE(rtw_channeltable_5g), /* 5G has no CCK rates */ .bitrates = rtw_ratetable + 4, .n_bitrates = ARRAY_SIZE(rtw_ratetable) - 4, .ht_cap = {0}, .vht_cap = {0}, }; struct rtw_watch_dog_iter_data { struct rtw_dev *rtwdev; struct rtw_vif *rtwvif; }; static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct rtw_bf_info *bf_info = &rtwdev->bf_info; u8 fix_rate_enable = 0; u8 new_csi_rate_idx; if (rtwvif->bfee.role != RTW_BFEE_SU && rtwvif->bfee.role != RTW_BFEE_MU) return; rtw_chip_cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi, bf_info->cur_csi_rpt_rate, fix_rate_enable, &new_csi_rate_idx); if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate) bf_info->cur_csi_rpt_rate = new_csi_rate_idx; } static void rtw_vif_watch_dog_iter(void *data, struct ieee80211_vif *vif) { struct rtw_watch_dog_iter_data *iter_data = data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; if (vif->type == NL80211_IFTYPE_STATION) if (vif->cfg.assoc) iter_data->rtwvif = rtwvif; rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif); rtwvif->stats.tx_unicast = 0; rtwvif->stats.rx_unicast = 0; rtwvif->stats.tx_cnt = 0; rtwvif->stats.rx_cnt = 0; } static void rtw_sw_beacon_loss_check(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, int received_beacons) { int watchdog_delay = 2000000 / 1024; /* TU */ int beacon_int, expected_beacons; if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !rtwvif) return; beacon_int = rtwvif_to_vif(rtwvif)->bss_conf.beacon_int; expected_beacons = DIV_ROUND_UP(watchdog_delay, beacon_int); rtwdev->beacon_loss = received_beacons < expected_beacons / 2; } /* process TX/RX statistics periodically for hardware, * the information helps hardware to enhance performance */ static void rtw_watch_dog_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, watch_dog_work.work); struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_watch_dog_iter_data data = {}; bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); int received_beacons = rtwdev->dm_info.cur_pkt_count.num_bcn_pkt; u32 tx_unicast_mbps, rx_unicast_mbps; bool ps_active; mutex_lock(&rtwdev->mutex); if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) goto unlock; ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, RTW_WATCH_DOG_DELAY_TIME); if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100) set_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); else clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags)) rtw_coex_wl_status_change_notify(rtwdev, 0); if (stats->tx_cnt > RTW_LPS_THRESHOLD || stats->rx_cnt > RTW_LPS_THRESHOLD) ps_active = true; else ps_active = false; tx_unicast_mbps = stats->tx_unicast >> RTW_TP_SHIFT; rx_unicast_mbps = stats->rx_unicast >> RTW_TP_SHIFT; ewma_tp_add(&stats->tx_ewma_tp, tx_unicast_mbps); ewma_tp_add(&stats->rx_ewma_tp, rx_unicast_mbps); stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); /* reset tx/rx statictics */ stats->tx_unicast = 0; stats->rx_unicast = 0; stats->tx_cnt = 0; stats->rx_cnt = 0; if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) goto unlock; /* make sure BB/RF is working for dynamic mech */ rtw_leave_lps(rtwdev); rtw_coex_wl_status_check(rtwdev); rtw_coex_query_bt_hid_list(rtwdev); rtw_coex_active_query_bt_info(rtwdev); rtw_phy_dynamic_mechanism(rtwdev); rtw_hci_dynamic_rx_agg(rtwdev, tx_unicast_mbps >= 1 || rx_unicast_mbps >= 1); data.rtwdev = rtwdev; /* rtw_iterate_vifs internally uses an atomic iterator which is needed * to avoid taking local->iflist_mtx mutex */ rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data); rtw_sw_beacon_loss_check(rtwdev, data.rtwvif, received_beacons); /* fw supports only one station associated to enter lps, if there are * more than two stations associated to the AP, then we can not enter * lps, because fw does not handle the overlapped beacon interval * * rtw_recalc_lps() iterate vifs and determine if driver can enter * ps by vif->type and vif->cfg.ps, all we need to do here is to * get that vif and check if device is having traffic more than the * threshold. */ if (rtwdev->ps_enabled && data.rtwvif && !ps_active && !rtwdev->beacon_loss && !rtwdev->ap_active) rtw_enter_lps(rtwdev, data.rtwvif->port); rtwdev->watch_dog_cnt++; unlock: mutex_unlock(&rtwdev->mutex); } static void rtw_c2h_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work); struct sk_buff *skb, *tmp; skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { skb_unlink(skb, &rtwdev->c2h_queue); rtw_fw_c2h_cmd_handle(rtwdev, skb); dev_kfree_skb_any(skb); } } static void rtw_ips_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work); mutex_lock(&rtwdev->mutex); if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE) rtw_enter_ips(rtwdev); mutex_unlock(&rtwdev->mutex); } static void rtw_sta_rc_work(struct work_struct *work) { struct rtw_sta_info *si = container_of(work, struct rtw_sta_info, rc_work); struct rtw_dev *rtwdev = si->rtwdev; mutex_lock(&rtwdev->mutex); rtw_update_sta_info(rtwdev, si, true); mutex_unlock(&rtwdev->mutex); } int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; int i; if (vif->type == NL80211_IFTYPE_STATION) { si->mac_id = rtwvif->mac_id; } else { si->mac_id = rtw_acquire_macid(rtwdev); if (si->mac_id >= RTW_MAX_MAC_ID_NUM) return -ENOSPC; } si->rtwdev = rtwdev; si->sta = sta; si->vif = vif; si->init_ra_lv = 1; ewma_rssi_init(&si->avg_rssi); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_init(rtwdev, sta->txq[i]); INIT_WORK(&si->rc_work, rtw_sta_rc_work); rtw_update_sta_info(rtwdev, si, true); rtw_fw_media_status_report(rtwdev, si->mac_id, true); rtwdev->sta_cnt++; rtwdev->beacon_loss = false; rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM joined with macid %d\n", sta->addr, si->mac_id); return 0; } void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; struct ieee80211_vif *vif = si->vif; int i; cancel_work_sync(&si->rc_work); if (vif->type != NL80211_IFTYPE_STATION) rtw_release_macid(rtwdev, si->mac_id); if (fw_exist) rtw_fw_media_status_report(rtwdev, si->mac_id, false); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_cleanup(rtwdev, sta->txq[i]); kfree(si->mask); rtwdev->sta_cnt--; rtw_dbg(rtwdev, RTW_DBG_STATE, "sta %pM with macid %d left\n", sta->addr, si->mac_id); } struct rtw_fwcd_hdr { u32 item; u32 size; u32 padding1; u32 padding2; } __packed; static int rtw_fwcd_prep(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; const struct rtw_fwcd_segs *segs = chip->fwcd_segs; u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr); u8 i; if (segs) { prep_size += segs->num * sizeof(struct rtw_fwcd_hdr); for (i = 0; i < segs->num; i++) prep_size += segs->segs[i]; } desc->data = vmalloc(prep_size); if (!desc->data) return -ENOMEM; desc->size = prep_size; desc->next = desc->data; return 0; } static u8 *rtw_fwcd_next(struct rtw_dev *rtwdev, u32 item, u32 size) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; struct rtw_fwcd_hdr *hdr; u8 *next; if (!desc->data) { rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared successfully\n"); return NULL; } next = desc->next + sizeof(struct rtw_fwcd_hdr); if (next - desc->data + size > desc->size) { rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared enough\n"); return NULL; } hdr = (struct rtw_fwcd_hdr *)(desc->next); hdr->item = item; hdr->size = size; hdr->padding1 = 0x01234567; hdr->padding2 = 0x89abcdef; desc->next = next + size; return next; } static void rtw_fwcd_dump(struct rtw_dev *rtwdev) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; rtw_dbg(rtwdev, RTW_DBG_FW, "dump fwcd\n"); /* Data will be freed after lifetime of device coredump. After calling * dev_coredump, data is supposed to be handled by the device coredump * framework. Note that a new dump will be discarded if a previous one * hasn't been released yet. */ dev_coredumpv(rtwdev->dev, desc->data, desc->size, GFP_KERNEL); } static void rtw_fwcd_free(struct rtw_dev *rtwdev, bool free_self) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; if (free_self) { rtw_dbg(rtwdev, RTW_DBG_FW, "free fwcd by self\n"); vfree(desc->data); } desc->data = NULL; desc->next = NULL; } static int rtw_fw_dump_crash_log(struct rtw_dev *rtwdev) { u32 size = rtwdev->chip->fw_rxff_size; u32 *buf; u8 seq; buf = (u32 *)rtw_fwcd_next(rtwdev, RTW_FWCD_TLV, size); if (!buf) return -ENOMEM; if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) { rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n"); return -EINVAL; } if (GET_FW_DUMP_LEN(buf) == 0) { rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n"); return -EINVAL; } seq = GET_FW_DUMP_SEQ(buf); if (seq > 0) { rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's seq is wrong: %d\n", seq); return -EINVAL; } return 0; } int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, u32 fwcd_item) { u32 rxff = rtwdev->chip->fw_rxff_size; u32 dump_size, done_size = 0; u8 *buf; int ret; buf = rtw_fwcd_next(rtwdev, fwcd_item, size); if (!buf) return -ENOMEM; while (size) { dump_size = size > rxff ? rxff : size; ret = rtw_ddma_to_fw_fifo(rtwdev, ocp_src + done_size, dump_size); if (ret) { rtw_err(rtwdev, "ddma fw 0x%x [+0x%x] to fw fifo fail\n", ocp_src, done_size); return ret; } ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, dump_size, (u32 *)(buf + done_size)); if (ret) { rtw_err(rtwdev, "dump fw 0x%x [+0x%x] from fw fifo fail\n", ocp_src, done_size); return ret; } size -= dump_size; done_size += dump_size; } return 0; } EXPORT_SYMBOL(rtw_dump_fw); int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size) { u8 *buf; u32 i; if (addr & 0x3) { WARN(1, "should be 4-byte aligned, addr = 0x%08x\n", addr); return -EINVAL; } buf = rtw_fwcd_next(rtwdev, RTW_FWCD_REG, size); if (!buf) return -ENOMEM; for (i = 0; i < size; i += 4) *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i); return 0; } EXPORT_SYMBOL(rtw_dump_reg); void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, struct ieee80211_bss_conf *conf) { struct ieee80211_vif *vif = NULL; if (conf) vif = container_of(conf, struct ieee80211_vif, bss_conf); if (conf && vif->cfg.assoc) { rtwvif->aid = vif->cfg.aid; rtwvif->net_type = RTW_NET_MGD_LINKED; } else { rtwvif->aid = 0; rtwvif->net_type = RTW_NET_NO_LINK; } } static void rtw_reset_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; struct rtw_sec_desc *sec = &rtwdev->sec; rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); } static void rtw_reset_sta_iter(void *data, struct ieee80211_sta *sta) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; if (rtwdev->sta_cnt == 0) { rtw_warn(rtwdev, "sta count before reset should not be 0\n"); return; } rtw_sta_remove(rtwdev, sta, false); } static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; rtw_bf_disassoc(rtwdev, vif, NULL); rtw_vif_assoc_changed(rtwvif, NULL); rtw_txq_cleanup(rtwdev, vif->txq); rtw_release_macid(rtwdev, rtwvif->mac_id); } void rtw_fw_recovery(struct rtw_dev *rtwdev) { if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)) ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work); } static void __fw_recovery_work(struct rtw_dev *rtwdev) { int ret = 0; set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); ret = rtw_fwcd_prep(rtwdev); if (ret) goto free; ret = rtw_fw_dump_crash_log(rtwdev); if (ret) goto free; ret = rtw_chip_dump_fw_crash(rtwdev); if (ret) goto free; rtw_fwcd_dump(rtwdev); free: rtw_fwcd_free(rtwdev, !!ret); rtw_write8(rtwdev, REG_MCU_TST_CFG, 0); WARN(1, "firmware crash, start reset and recover\n"); rcu_read_lock(); rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev); rcu_read_unlock(); rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev); rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev); bitmap_zero(rtwdev->hw_port, RTW_PORT_NUM); rtw_enter_ips(rtwdev); } static void rtw_fw_recovery_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, fw_recovery_work); mutex_lock(&rtwdev->mutex); __fw_recovery_work(rtwdev); mutex_unlock(&rtwdev->mutex); ieee80211_restart_hw(rtwdev->hw); } struct rtw_txq_ba_iter_data { }; static void rtw_txq_ba_iter(void *data, struct ieee80211_sta *sta) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int ret; u8 tid; tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); while (tid != IEEE80211_NUM_TIDS) { clear_bit(tid, si->tid_ba); ret = ieee80211_start_tx_ba_session(sta, tid, 0); if (ret == -EINVAL) { struct ieee80211_txq *txq; struct rtw_txq *rtwtxq; txq = sta->txq[tid]; rtwtxq = (struct rtw_txq *)txq->drv_priv; set_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags); } tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); } } static void rtw_txq_ba_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work); struct rtw_txq_ba_iter_data data; rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data); } void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel) { if (IS_CH_2G_BAND(channel)) pkt_stat->band = NL80211_BAND_2GHZ; else if (IS_CH_5G_BAND(channel)) pkt_stat->band = NL80211_BAND_5GHZ; else return; pkt_stat->freq = ieee80211_channel_to_frequency(channel, pkt_stat->band); } EXPORT_SYMBOL(rtw_set_rx_freq_band); void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period) { rtw_write32_set(rtwdev, REG_TCR, BIT_TCR_UPDATE_TIMIE); rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period - 1); } void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel, u8 primary_channel, enum rtw_supported_band band, enum rtw_bandwidth bandwidth) { enum nl80211_band nl_band = rtw_hw_to_nl80211_band(band); struct rtw_hal *hal = &rtwdev->hal; u8 *cch_by_bw = hal->cch_by_bw; u32 center_freq, primary_freq; enum rtw_sar_bands sar_band; u8 primary_channel_idx; center_freq = ieee80211_channel_to_frequency(center_channel, nl_band); primary_freq = ieee80211_channel_to_frequency(primary_channel, nl_band); /* assign the center channel used while 20M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_20] = primary_channel; /* assign the center channel used while current bw is selected */ cch_by_bw[bandwidth] = center_channel; switch (bandwidth) { case RTW_CHANNEL_WIDTH_20: default: primary_channel_idx = RTW_SC_DONT_CARE; break; case RTW_CHANNEL_WIDTH_40: if (primary_freq > center_freq) primary_channel_idx = RTW_SC_20_UPPER; else primary_channel_idx = RTW_SC_20_LOWER; break; case RTW_CHANNEL_WIDTH_80: if (primary_freq > center_freq) { if (primary_freq - center_freq == 10) primary_channel_idx = RTW_SC_20_UPPER; else primary_channel_idx = RTW_SC_20_UPMOST; /* assign the center channel used * while 40M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel + 4; } else { if (center_freq - primary_freq == 10) primary_channel_idx = RTW_SC_20_LOWER; else primary_channel_idx = RTW_SC_20_LOWEST; /* assign the center channel used * while 40M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel - 4; } break; } switch (center_channel) { case 1 ... 14: sar_band = RTW_SAR_BAND_0; break; case 36 ... 64: sar_band = RTW_SAR_BAND_1; break; case 100 ... 144: sar_band = RTW_SAR_BAND_3; break; case 149 ... 177: sar_band = RTW_SAR_BAND_4; break; default: WARN(1, "unknown ch(%u) to SAR band\n", center_channel); sar_band = RTW_SAR_BAND_0; break; } hal->current_primary_channel_index = primary_channel_idx; hal->current_band_width = bandwidth; hal->primary_channel = primary_channel; hal->current_channel = center_channel; hal->current_band_type = band; hal->sar_band = sar_band; } void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *chan_params) { struct ieee80211_channel *channel = chandef->chan; enum nl80211_chan_width width = chandef->width; u32 primary_freq, center_freq; u8 center_chan; u8 bandwidth = RTW_CHANNEL_WIDTH_20; center_chan = channel->hw_value; primary_freq = channel->center_freq; center_freq = chandef->center_freq1; switch (width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: bandwidth = RTW_CHANNEL_WIDTH_20; break; case NL80211_CHAN_WIDTH_40: bandwidth = RTW_CHANNEL_WIDTH_40; if (primary_freq > center_freq) center_chan -= 2; else center_chan += 2; break; case NL80211_CHAN_WIDTH_80: bandwidth = RTW_CHANNEL_WIDTH_80; if (primary_freq > center_freq) { if (primary_freq - center_freq == 10) center_chan -= 2; else center_chan -= 6; } else { if (center_freq - primary_freq == 10) center_chan += 2; else center_chan += 6; } break; default: center_chan = 0; break; } chan_params->center_chan = center_chan; chan_params->bandwidth = bandwidth; chan_params->primary_chan = channel->hw_value; } void rtw_set_channel(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_hw *hw = rtwdev->hw; struct rtw_hal *hal = &rtwdev->hal; struct rtw_channel_params ch_param; u8 center_chan, primary_chan, bandwidth, band; rtw_get_channel_params(&hw->conf.chandef, &ch_param); if (WARN(ch_param.center_chan == 0, "Invalid channel\n")) return; center_chan = ch_param.center_chan; primary_chan = ch_param.primary_chan; bandwidth = ch_param.bandwidth; band = ch_param.center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; rtw_update_channel(rtwdev, center_chan, primary_chan, band, bandwidth); if (rtwdev->scan_info.op_chan) rtw_store_op_chan(rtwdev, true); chip->ops->set_channel(rtwdev, center_chan, bandwidth, hal->current_primary_channel_index); if (hal->current_band_type == RTW_BAND_5G) { rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); } else { if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G); else rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN); } rtw_phy_set_tx_power_level(rtwdev, center_chan); /* if the channel isn't set for scanning, we will do RF calibration * in ieee80211_ops::mgd_prepare_tx(). Performing the calibration * during scanning on each channel takes too long. */ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtwdev->need_rfk = true; } void rtw_chip_prepare_tx(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; if (rtwdev->need_rfk) { rtwdev->need_rfk = false; chip->ops->phy_calibration(rtwdev); } } static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr) { int i; for (i = 0; i < ETH_ALEN; i++) rtw_write8(rtwdev, start + i, addr[i]); } void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u32 config) { u32 addr, mask; if (config & PORT_SET_MAC_ADDR) { addr = rtwvif->conf->mac_addr.addr; rtw_vif_write_addr(rtwdev, addr, rtwvif->mac_addr); } if (config & PORT_SET_BSSID) { addr = rtwvif->conf->bssid.addr; rtw_vif_write_addr(rtwdev, addr, rtwvif->bssid); } if (config & PORT_SET_NET_TYPE) { addr = rtwvif->conf->net_type.addr; mask = rtwvif->conf->net_type.mask; rtw_write32_mask(rtwdev, addr, mask, rtwvif->net_type); } if (config & PORT_SET_AID) { addr = rtwvif->conf->aid.addr; mask = rtwvif->conf->aid.mask; rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid); } if (config & PORT_SET_BCN_CTRL) { addr = rtwvif->conf->bcn_ctrl.addr; mask = rtwvif->conf->bcn_ctrl.mask; rtw_write8_mask(rtwdev, addr, mask, rtwvif->bcn_ctrl); } } static u8 hw_bw_cap_to_bitamp(u8 bw_cap) { u8 bw = 0; switch (bw_cap) { case EFUSE_HW_CAP_IGNORE: case EFUSE_HW_CAP_SUPP_BW80: bw |= BIT(RTW_CHANNEL_WIDTH_80); fallthrough; case EFUSE_HW_CAP_SUPP_BW40: bw |= BIT(RTW_CHANNEL_WIDTH_40); fallthrough; default: bw |= BIT(RTW_CHANNEL_WIDTH_20); break; } return bw; } static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; if (hw_ant_num == EFUSE_HW_CAP_IGNORE || hw_ant_num >= hal->rf_path_num) return; switch (hw_ant_num) { case 1: hal->rf_type = RF_1T1R; hal->rf_path_num = 1; if (!chip->fix_rf_phy_num) hal->rf_phy_num = hal->rf_path_num; hal->antenna_tx = BB_PATH_A; hal->antenna_rx = BB_PATH_A; break; default: WARN(1, "invalid hw configuration from efuse\n"); break; } } static u64 get_vht_ra_mask(struct ieee80211_sta *sta) { u64 ra_mask = 0; u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); u8 vht_mcs_cap; int i, nss; /* 4SS, every two bits for MCS7/8/9 */ for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 10) { vht_mcs_cap = mcs_map & 0x3; switch (vht_mcs_cap) { case 2: /* MCS9 */ ra_mask |= 0x3ffULL << nss; break; case 1: /* MCS8 */ ra_mask |= 0x1ffULL << nss; break; case 0: /* MCS7 */ ra_mask |= 0x0ffULL << nss; break; default: break; } } return ra_mask; } static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num) { u8 rate_id = 0; switch (wireless_set) { case WIRELESS_CCK: rate_id = RTW_RATEID_B_20M; break; case WIRELESS_OFDM: rate_id = RTW_RATEID_G; break; case WIRELESS_CCK | WIRELESS_OFDM: rate_id = RTW_RATEID_BG; break; case WIRELESS_OFDM | WIRELESS_HT: if (tx_num == 1) rate_id = RTW_RATEID_GN_N1SS; else if (tx_num == 2) rate_id = RTW_RATEID_GN_N2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; break; case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT: if (bw_mode == RTW_CHANNEL_WIDTH_40) { if (tx_num == 1) rate_id = RTW_RATEID_BGN_40M_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_BGN_40M_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR7_N_4SS; } else { if (tx_num == 1) rate_id = RTW_RATEID_BGN_20M_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_BGN_20M_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR7_N_4SS; } break; case WIRELESS_OFDM | WIRELESS_VHT: if (tx_num == 1) rate_id = RTW_RATEID_ARFR1_AC_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR0_AC_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; break; case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_VHT: if (bw_mode >= RTW_CHANNEL_WIDTH_80) { if (tx_num == 1) rate_id = RTW_RATEID_ARFR1_AC_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR0_AC_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; } else { if (tx_num == 1) rate_id = RTW_RATEID_ARFR2_AC_2G_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR3_AC_2G_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; } break; default: break; } return rate_id; } #define RA_MASK_CCK_RATES 0x0000f #define RA_MASK_OFDM_RATES 0x00ff0 #define RA_MASK_HT_RATES_1SS (0xff000ULL << 0) #define RA_MASK_HT_RATES_2SS (0xff000ULL << 8) #define RA_MASK_HT_RATES_3SS (0xff000ULL << 16) #define RA_MASK_HT_RATES (RA_MASK_HT_RATES_1SS | \ RA_MASK_HT_RATES_2SS | \ RA_MASK_HT_RATES_3SS) #define RA_MASK_VHT_RATES_1SS (0x3ff000ULL << 0) #define RA_MASK_VHT_RATES_2SS (0x3ff000ULL << 10) #define RA_MASK_VHT_RATES_3SS (0x3ff000ULL << 20) #define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \ RA_MASK_VHT_RATES_2SS | \ RA_MASK_VHT_RATES_3SS) #define RA_MASK_CCK_IN_BG 0x00005 #define RA_MASK_CCK_IN_HT 0x00005 #define RA_MASK_CCK_IN_VHT 0x00005 #define RA_MASK_OFDM_IN_VHT 0x00010 #define RA_MASK_OFDM_IN_HT_2G 0x00010 #define RA_MASK_OFDM_IN_HT_5G 0x00030 static u64 rtw_rate_mask_rssi(struct rtw_sta_info *si, u8 wireless_set) { u8 rssi_level = si->rssi_level; if (wireless_set == WIRELESS_CCK) return 0xffffffffffffffffULL; if (rssi_level == 0) return 0xffffffffffffffffULL; else if (rssi_level == 1) return 0xfffffffffffffff0ULL; else if (rssi_level == 2) return 0xffffffffffffefe0ULL; else if (rssi_level == 3) return 0xffffffffffffcfc0ULL; else if (rssi_level == 4) return 0xffffffffffff8f80ULL; else return 0xffffffffffff0f00ULL; } static u64 rtw_rate_mask_recover(u64 ra_mask, u64 ra_mask_bak) { if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0) ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); if (ra_mask == 0) ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); return ra_mask; } static u64 rtw_rate_mask_cfg(struct rtw_dev *rtwdev, struct rtw_sta_info *si, u64 ra_mask, bool is_vht_enable) { struct rtw_hal *hal = &rtwdev->hal; const struct cfg80211_bitrate_mask *mask = si->mask; u64 cfg_mask = GENMASK_ULL(63, 0); u8 band; if (!si->use_cfg_mask) return ra_mask; band = hal->current_band_type; if (band == RTW_BAND_2G) { band = NL80211_BAND_2GHZ; cfg_mask = mask->control[band].legacy; } else if (band == RTW_BAND_5G) { band = NL80211_BAND_5GHZ; cfg_mask = u64_encode_bits(mask->control[band].legacy, RA_MASK_OFDM_RATES); } if (!is_vht_enable) { if (ra_mask & RA_MASK_HT_RATES_1SS) cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], RA_MASK_HT_RATES_1SS); if (ra_mask & RA_MASK_HT_RATES_2SS) cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], RA_MASK_HT_RATES_2SS); } else { if (ra_mask & RA_MASK_VHT_RATES_1SS) cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], RA_MASK_VHT_RATES_1SS); if (ra_mask & RA_MASK_VHT_RATES_2SS) cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], RA_MASK_VHT_RATES_2SS); } ra_mask &= cfg_mask; return ra_mask; } void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, bool reset_ra_mask) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct ieee80211_sta *sta = si->sta; struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_hal *hal = &rtwdev->hal; u8 wireless_set; u8 bw_mode; u8 rate_id; u8 stbc_en = 0; u8 ldpc_en = 0; u8 tx_num = 1; u64 ra_mask = 0; u64 ra_mask_bak = 0; bool is_vht_enable = false; bool is_support_sgi = false; if (sta->deflink.vht_cap.vht_supported) { is_vht_enable = true; ra_mask |= get_vht_ra_mask(sta); if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) stbc_en = VHT_STBC_EN; if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; } else if (sta->deflink.ht_cap.ht_supported) { ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 36) | ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 28) | (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) | (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = HT_STBC_EN; if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = HT_LDPC_EN; } if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss) ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; else if (efuse->hw_cap.nss == 2) ra_mask &= RA_MASK_VHT_RATES_2SS | RA_MASK_HT_RATES_2SS | RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; if (hal->current_band_type == RTW_BAND_5G) { ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; ra_mask_bak = ra_mask; if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_OFDM | WIRELESS_VHT; } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G; wireless_set = WIRELESS_OFDM | WIRELESS_HT; } else { wireless_set = WIRELESS_OFDM; } dm_info->rrsr_val_init = RRSR_INIT_5G; } else if (hal->current_band_type == RTW_BAND_2G) { ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ]; ra_mask_bak = ra_mask; if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT | WIRELESS_VHT; } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT | RA_MASK_OFDM_IN_HT_2G; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT; } else if (sta->deflink.supp_rates[0] <= 0xf) { wireless_set = WIRELESS_CCK; } else { ra_mask &= RA_MASK_OFDM_RATES | RA_MASK_CCK_IN_BG; wireless_set = WIRELESS_CCK | WIRELESS_OFDM; } dm_info->rrsr_val_init = RRSR_INIT_2G; } else { rtw_err(rtwdev, "Unknown band type\n"); ra_mask_bak = ra_mask; wireless_set = 0; } switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; is_support_sgi = sta->deflink.vht_cap.vht_supported && (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW_CHANNEL_WIDTH_40; is_support_sgi = sta->deflink.ht_cap.ht_supported && (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW_CHANNEL_WIDTH_20; is_support_sgi = sta->deflink.ht_cap.ht_supported && (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } if (sta->deflink.vht_cap.vht_supported || sta->deflink.ht_cap.ht_supported) tx_num = efuse->hw_cap.nss; rate_id = get_rate_id(wireless_set, bw_mode, tx_num); ra_mask &= rtw_rate_mask_rssi(si, wireless_set); ra_mask = rtw_rate_mask_recover(ra_mask, ra_mask_bak); ra_mask = rtw_rate_mask_cfg(rtwdev, si, ra_mask, is_vht_enable); si->bw_mode = bw_mode; si->stbc_en = stbc_en; si->ldpc_en = ldpc_en; si->sgi_enable = is_support_sgi; si->vht_enable = is_vht_enable; si->ra_mask = ra_mask; si->rate_id = rate_id; rtw_fw_send_ra_info(rtwdev, si, reset_ra_mask); } int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw; int ret = 0; fw = &rtwdev->fw; wait_for_completion(&fw->completion); if (!fw->firmware) ret = -EINVAL; if (chip->wow_fw_name) { fw = &rtwdev->wow_fw; wait_for_completion(&fw->completion); if (!fw->firmware) ret = -EINVAL; } return ret; } EXPORT_SYMBOL(rtw_wait_firmware_completion); static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { const struct rtw_chip_info *chip = rtwdev->chip; if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported || !fw->feature) return LPS_DEEP_MODE_NONE; if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) && rtw_fw_feature_check(fw, FW_FEATURE_PG)) return LPS_DEEP_MODE_PG; if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) && rtw_fw_feature_check(fw, FW_FEATURE_LCLK)) return LPS_DEEP_MODE_LCLK; return LPS_DEEP_MODE_NONE; } int rtw_power_on(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw = &rtwdev->fw; bool wifi_only; int ret; ret = rtw_hci_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup hci\n"); goto err; } /* power on MAC before firmware downloaded */ ret = rtw_mac_power_on(rtwdev); if (ret) { rtw_err(rtwdev, "failed to power on mac\n"); goto err; } ret = rtw_wait_firmware_completion(rtwdev); if (ret) { rtw_err(rtwdev, "failed to wait firmware completion\n"); goto err_off; } ret = rtw_download_firmware(rtwdev, fw); if (ret) { rtw_err(rtwdev, "failed to download firmware\n"); goto err_off; } /* config mac after firmware downloaded */ ret = rtw_mac_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to configure mac\n"); goto err_off; } chip->ops->phy_set_param(rtwdev); ret = rtw_hci_start(rtwdev); if (ret) { rtw_err(rtwdev, "failed to start hci\n"); goto err_off; } /* send H2C after HCI has started */ rtw_fw_send_general_info(rtwdev); rtw_fw_send_phydm_info(rtwdev); wifi_only = !rtwdev->efuse.btcoex; rtw_coex_power_on_setting(rtwdev); rtw_coex_init_hw_config(rtwdev, wifi_only); return 0; err_off: rtw_mac_power_off(rtwdev); err: return ret; } EXPORT_SYMBOL(rtw_power_on); void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start) { if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_NOTIFY_SCAN)) return; if (start) { rtw_fw_scan_notify(rtwdev, true); } else { reinit_completion(&rtwdev->fw_scan_density); rtw_fw_scan_notify(rtwdev, false); if (!wait_for_completion_timeout(&rtwdev->fw_scan_density, SCAN_NOTIFY_TIMEOUT)) rtw_warn(rtwdev, "firmware failed to report density after scan\n"); } } void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, const u8 *mac_addr, bool hw_scan) { u32 config = 0; int ret = 0; rtw_leave_lps(rtwdev); if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) { ret = rtw_leave_ips(rtwdev); if (ret) { rtw_err(rtwdev, "failed to leave idle state\n"); return; } } ether_addr_copy(rtwvif->mac_addr, mac_addr); config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); rtw_coex_scan_notify(rtwdev, COEX_SCAN_START); rtw_core_fw_scan_notify(rtwdev, true); set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); set_bit(RTW_FLAG_SCANNING, rtwdev->flags); } void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan) { struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL; u32 config = 0; if (!rtwvif) return; clear_bit(RTW_FLAG_SCANNING, rtwdev->flags); clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); rtw_core_fw_scan_notify(rtwdev, false); ether_addr_copy(rtwvif->mac_addr, vif->addr); config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work); } int rtw_core_start(struct rtw_dev *rtwdev) { int ret; ret = rtwdev->chip->ops->power_on(rtwdev); if (ret) return ret; rtw_sec_enable_sec_engine(rtwdev); rtwdev->lps_conf.deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->fw); rtwdev->lps_conf.wow_deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->wow_fw); /* rcr reset after powered on */ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, RTW_WATCH_DOG_DELAY_TIME); set_bit(RTW_FLAG_RUNNING, rtwdev->flags); return 0; } void rtw_power_off(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); rtw_coex_power_off_setting(rtwdev); rtw_mac_power_off(rtwdev); } EXPORT_SYMBOL(rtw_power_off); void rtw_core_stop(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; clear_bit(RTW_FLAG_RUNNING, rtwdev->flags); clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); mutex_unlock(&rtwdev->mutex); cancel_work_sync(&rtwdev->c2h_work); cancel_work_sync(&rtwdev->update_beacon_work); cancel_delayed_work_sync(&rtwdev->watch_dog_work); cancel_delayed_work_sync(&coex->bt_relink_work); cancel_delayed_work_sync(&coex->bt_reenable_work); cancel_delayed_work_sync(&coex->defreeze_work); cancel_delayed_work_sync(&coex->wl_remain_work); cancel_delayed_work_sync(&coex->bt_remain_work); cancel_delayed_work_sync(&coex->wl_connecting_work); cancel_delayed_work_sync(&coex->bt_multi_link_remain_work); cancel_delayed_work_sync(&coex->wl_ccklock_work); mutex_lock(&rtwdev->mutex); rtwdev->chip->ops->power_off(rtwdev); } static void rtw_init_ht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_ht_cap *ht_cap) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; int i; ht_cap->ht_supported = true; ht_cap->cap = 0; ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); if (rtw_chip_has_rx_ldpc(rtwdev)) ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; if (rtw_chip_has_tx_stbc(rtwdev)) ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40)) ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_SGI_40; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = chip->ampdu_density; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; for (i = 0; i < efuse->hw_cap.nss; i++) ht_cap->mcs.rx_mask[i] = 0xFF; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss); } static void rtw_init_vht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_vht_cap *vht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; u16 mcs_map = 0; __le16 highest; int i; if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) return; vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | 0; if (rtwdev->hal.rf_path_num > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; vht_cap->cap |= (rtwdev->hal.bfee_sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); if (rtw_chip_has_rx_ldpc(rtwdev)) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; for (i = 0; i < 8; i++) { if (i < efuse->hw_cap.nss) mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); else mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); } highest = cpu_to_le16(390 * efuse->hw_cap.nss); vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.rx_highest = highest; vht_cap->vht_mcs.tx_highest = highest; } static u16 rtw_get_max_scan_ie_len(struct rtw_dev *rtwdev) { u16 len; len = rtwdev->chip->max_scan_ie_len; if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD) && rtwdev->chip->id == RTW_CHIP_TYPE_8822C) len = IEEE80211_MAX_DATA_LEN; else if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM)) len -= RTW_OLD_PROBE_PG_CNT * TX_PAGE_SIZE; return len; } static void rtw_set_supported_band(struct ieee80211_hw *hw, const struct rtw_chip_info *chip) { struct rtw_dev *rtwdev = hw->priv; struct ieee80211_supported_band *sband; if (chip->band & RTW_BAND_2G) { sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL); if (!sband) goto err_out; if (chip->ht_supported) rtw_init_ht_cap(rtwdev, &sband->ht_cap); hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; } if (chip->band & RTW_BAND_5G) { sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL); if (!sband) goto err_out; if (chip->ht_supported) rtw_init_ht_cap(rtwdev, &sband->ht_cap); if (chip->vht_supported) rtw_init_vht_cap(rtwdev, &sband->vht_cap); hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } return; err_out: rtw_err(rtwdev, "failed to set supported band\n"); } static void rtw_unset_supported_band(struct ieee80211_hw *hw, const struct rtw_chip_info *chip) { kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); } static void rtw_vif_smps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) return; if (rtwdev->hal.txrx_1ss) ieee80211_request_smps(vif, 0, IEEE80211_SMPS_STATIC); else ieee80211_request_smps(vif, 0, IEEE80211_SMPS_OFF); } void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; if (!chip->ops->config_txrx_mode || rtwdev->hal.txrx_1ss == txrx_1ss) return; rtwdev->hal.txrx_1ss = txrx_1ss; if (txrx_1ss) chip->ops->config_txrx_mode(rtwdev, BB_PATH_A, BB_PATH_A, false); else chip->ops->config_txrx_mode(rtwdev, hal->antenna_tx, hal->antenna_rx, false); rtw_iterate_vifs_atomic(rtwdev, rtw_vif_smps_iter, rtwdev); } static void __update_firmware_feature(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { u32 feature; const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)fw->firmware->data; feature = le32_to_cpu(fw_hdr->feature); fw->feature = feature & FW_FEATURE_SIG ? feature : 0; if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C && RTW_FW_SUIT_VER_CODE(rtwdev->fw) < RTW_FW_VER_CODE(9, 9, 13)) fw->feature_ext |= FW_FEATURE_EXT_OLD_PAGE_NUM; } static void __update_firmware_info(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)fw->firmware->data; fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver); fw->version = le16_to_cpu(fw_hdr->version); fw->sub_version = fw_hdr->subversion; fw->sub_index = fw_hdr->subindex; __update_firmware_feature(rtwdev, fw); } static void __update_firmware_info_legacy(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { struct rtw_fw_hdr_legacy *legacy = (struct rtw_fw_hdr_legacy *)fw->firmware->data; fw->h2c_version = 0; fw->version = le16_to_cpu(legacy->version); fw->sub_version = legacy->subversion1; fw->sub_index = legacy->subversion2; } static void update_firmware_info(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { if (rtw_chip_wcpu_11n(rtwdev)) __update_firmware_info_legacy(rtwdev, fw); else __update_firmware_info(rtwdev, fw); } static void rtw_load_firmware_cb(const struct firmware *firmware, void *context) { struct rtw_fw_state *fw = context; struct rtw_dev *rtwdev = fw->rtwdev; if (!firmware || !firmware->data) { rtw_err(rtwdev, "failed to request firmware\n"); complete_all(&fw->completion); return; } fw->firmware = firmware; update_firmware_info(rtwdev, fw); complete_all(&fw->completion); rtw_info(rtwdev, "%sFirmware version %u.%u.%u, H2C version %u\n", fw->type == RTW_WOWLAN_FW ? "WOW " : "", fw->version, fw->sub_version, fw->sub_index, fw->h2c_version); } static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type) { const char *fw_name; struct rtw_fw_state *fw; int ret; switch (type) { case RTW_WOWLAN_FW: fw = &rtwdev->wow_fw; fw_name = rtwdev->chip->wow_fw_name; break; case RTW_NORMAL_FW: fw = &rtwdev->fw; fw_name = rtwdev->chip->fw_name; break; default: rtw_warn(rtwdev, "unsupported firmware type\n"); return -ENOENT; } fw->type = type; fw->rtwdev = rtwdev; init_completion(&fw->completion); ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, GFP_KERNEL, fw, rtw_load_firmware_cb); if (ret) { rtw_err(rtwdev, "failed to async firmware request\n"); return ret; } return 0; } static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; switch (rtw_hci_type(rtwdev)) { case RTW_HCI_TYPE_PCIE: rtwdev->hci.rpwm_addr = 0x03d9; rtwdev->hci.cpwm_addr = 0x03da; break; case RTW_HCI_TYPE_SDIO: rtwdev->hci.rpwm_addr = REG_SDIO_HRPWM1; rtwdev->hci.cpwm_addr = REG_SDIO_HCPWM1_V2; break; case RTW_HCI_TYPE_USB: rtwdev->hci.rpwm_addr = 0xfe58; rtwdev->hci.cpwm_addr = 0xfe57; break; default: rtw_err(rtwdev, "unsupported hci type\n"); return -EINVAL; } hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1); hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version); hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1; if (hal->chip_version & BIT_RF_TYPE_ID) { hal->rf_type = RF_2T2R; hal->rf_path_num = 2; hal->antenna_tx = BB_PATH_AB; hal->antenna_rx = BB_PATH_AB; } else { hal->rf_type = RF_1T1R; hal->rf_path_num = 1; hal->antenna_tx = BB_PATH_A; hal->antenna_rx = BB_PATH_A; } hal->rf_phy_num = chip->fix_rf_phy_num ? chip->fix_rf_phy_num : hal->rf_path_num; efuse->physical_size = chip->phy_efuse_size; efuse->logical_size = chip->log_efuse_size; efuse->protect_size = chip->ptct_efuse_size; /* default use ack */ rtwdev->hal.rcr |= BIT_VHT_DACK; hal->bfee_sts_cap = 3; return 0; } static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev) { struct rtw_fw_state *fw = &rtwdev->fw; int ret; ret = rtw_hci_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup hci\n"); goto err; } ret = rtw_mac_power_on(rtwdev); if (ret) { rtw_err(rtwdev, "failed to power on mac\n"); goto err; } rtw_write8(rtwdev, REG_C2HEVT, C2H_HW_FEATURE_DUMP); wait_for_completion(&fw->completion); if (!fw->firmware) { ret = -EINVAL; rtw_err(rtwdev, "failed to load firmware\n"); goto err; } ret = rtw_download_firmware(rtwdev, fw); if (ret) { rtw_err(rtwdev, "failed to download firmware\n"); goto err_off; } return 0; err_off: rtw_mac_power_off(rtwdev); err: return ret; } static int rtw_dump_hw_feature(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; u8 hw_feature[HW_FEATURE_LEN]; u8 id; u8 bw; int i; if (!rtwdev->chip->hw_feature_report) return 0; id = rtw_read8(rtwdev, REG_C2HEVT); if (id != C2H_HW_FEATURE_REPORT) { rtw_err(rtwdev, "failed to read hw feature report\n"); return -EBUSY; } for (i = 0; i < HW_FEATURE_LEN; i++) hw_feature[i] = rtw_read8(rtwdev, REG_C2HEVT + 2 + i); rtw_write8(rtwdev, REG_C2HEVT, 0); bw = GET_EFUSE_HW_CAP_BW(hw_feature); efuse->hw_cap.bw = hw_bw_cap_to_bitamp(bw); efuse->hw_cap.hci = GET_EFUSE_HW_CAP_HCI(hw_feature); efuse->hw_cap.nss = GET_EFUSE_HW_CAP_NSS(hw_feature); efuse->hw_cap.ptcl = GET_EFUSE_HW_CAP_PTCL(hw_feature); efuse->hw_cap.ant_num = GET_EFUSE_HW_CAP_ANT_NUM(hw_feature); rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num); if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE || efuse->hw_cap.nss > rtwdev->hal.rf_path_num) efuse->hw_cap.nss = rtwdev->hal.rf_path_num; rtw_dbg(rtwdev, RTW_DBG_EFUSE, "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n", efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl, efuse->hw_cap.ant_num, efuse->hw_cap.nss); return 0; } static void rtw_chip_efuse_disable(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); rtw_mac_power_off(rtwdev); } static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; int ret; mutex_lock(&rtwdev->mutex); /* power on mac to read efuse */ ret = rtw_chip_efuse_enable(rtwdev); if (ret) goto out_unlock; ret = rtw_parse_efuse_map(rtwdev); if (ret) goto out_disable; ret = rtw_dump_hw_feature(rtwdev); if (ret) goto out_disable; ret = rtw_check_supported_rfe(rtwdev); if (ret) goto out_disable; if (efuse->crystal_cap == 0xff) efuse->crystal_cap = 0; if (efuse->pa_type_2g == 0xff) efuse->pa_type_2g = 0; if (efuse->pa_type_5g == 0xff) efuse->pa_type_5g = 0; if (efuse->lna_type_2g == 0xff) efuse->lna_type_2g = 0; if (efuse->lna_type_5g == 0xff) efuse->lna_type_5g = 0; if (efuse->channel_plan == 0xff) efuse->channel_plan = 0x7f; if (efuse->rf_board_option == 0xff) efuse->rf_board_option = 0; if (efuse->bt_setting & BIT(0)) efuse->share_ant = true; if (efuse->regd == 0xff) efuse->regd = 0; if (efuse->tx_bb_swing_setting_2g == 0xff) efuse->tx_bb_swing_setting_2g = 0; if (efuse->tx_bb_swing_setting_5g == 0xff) efuse->tx_bb_swing_setting_5g = 0; efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20; efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0; efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0; efuse->ext_lna_5g = efuse->lna_type_5g & BIT(3) ? 1 : 0; if (!is_valid_ether_addr(efuse->addr)) { eth_random_addr(efuse->addr); dev_warn(rtwdev->dev, "efuse MAC invalid, using random\n"); } out_disable: rtw_chip_efuse_disable(rtwdev); out_unlock: mutex_unlock(&rtwdev->mutex); return ret; } static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev); if (!rfe_def) return -ENODEV; rtw_phy_setup_phy_cond(rtwdev, hal->pkg_type); rtw_phy_init_tx_power(rtwdev); rtw_load_table(rtwdev, rfe_def->phy_pg_tbl); rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl); rtw_phy_tx_power_by_rate_config(hal); rtw_phy_tx_power_limit_config(hal); return 0; } int rtw_chip_info_setup(struct rtw_dev *rtwdev) { int ret; ret = rtw_chip_parameter_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip parameters\n"); goto err_out; } ret = rtw_chip_efuse_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip efuse info\n"); goto err_out; } ret = rtw_chip_board_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip board info\n"); goto err_out; } return 0; err_out: return ret; } EXPORT_SYMBOL(rtw_chip_info_setup); static void rtw_stats_init(struct rtw_dev *rtwdev) { struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_dm_info *dm_info = &rtwdev->dm_info; int i; ewma_tp_init(&stats->tx_ewma_tp); ewma_tp_init(&stats->rx_ewma_tp); for (i = 0; i < RTW_EVM_NUM; i++) ewma_evm_init(&dm_info->ewma_evm[i]); for (i = 0; i < RTW_SNR_NUM; i++) ewma_snr_init(&dm_info->ewma_snr[i]); } int rtw_core_init(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_coex *coex = &rtwdev->coex; int ret; INIT_LIST_HEAD(&rtwdev->rsvd_page_list); INIT_LIST_HEAD(&rtwdev->txqs); timer_setup(&rtwdev->tx_report.purge_timer, rtw_tx_report_purge_timer, 0); rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!rtwdev->tx_wq) { rtw_warn(rtwdev, "alloc_workqueue rtw_tx_wq failed\n"); return -ENOMEM; } INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work); INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work); INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work); INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work); INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work); INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work); INIT_DELAYED_WORK(&coex->wl_connecting_work, rtw_coex_wl_connecting_work); INIT_DELAYED_WORK(&coex->bt_multi_link_remain_work, rtw_coex_bt_multi_link_remain_work); INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work); INIT_WORK(&rtwdev->tx_work, rtw_tx_work); INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); INIT_WORK(&rtwdev->ips_work, rtw_ips_work); INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work); INIT_WORK(&rtwdev->update_beacon_work, rtw_fw_update_beacon_work); INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work); skb_queue_head_init(&rtwdev->c2h_queue); skb_queue_head_init(&rtwdev->coex.queue); skb_queue_head_init(&rtwdev->tx_report.queue); spin_lock_init(&rtwdev->txq_lock); spin_lock_init(&rtwdev->tx_report.q_lock); mutex_init(&rtwdev->mutex); mutex_init(&rtwdev->hal.tx_power_mutex); init_waitqueue_head(&rtwdev->coex.wait); init_completion(&rtwdev->lps_leave_check); init_completion(&rtwdev->fw_scan_density); rtwdev->sec.total_cam_num = 32; rtwdev->hal.current_channel = 1; rtwdev->dm_info.fix_rate = U8_MAX; rtw_stats_init(rtwdev); /* default rx filter setting */ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV | BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | BIT_AB | BIT_AM | BIT_APM; ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW); if (ret) { rtw_warn(rtwdev, "no firmware loaded\n"); goto out; } if (chip->wow_fw_name) { ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW); if (ret) { rtw_warn(rtwdev, "no wow firmware loaded\n"); wait_for_completion(&rtwdev->fw.completion); if (rtwdev->fw.firmware) release_firmware(rtwdev->fw.firmware); goto out; } } return 0; out: destroy_workqueue(rtwdev->tx_wq); return ret; } EXPORT_SYMBOL(rtw_core_init); void rtw_core_deinit(struct rtw_dev *rtwdev) { struct rtw_fw_state *fw = &rtwdev->fw; struct rtw_fw_state *wow_fw = &rtwdev->wow_fw; struct rtw_rsvd_page *rsvd_pkt, *tmp; unsigned long flags; rtw_wait_firmware_completion(rtwdev); if (fw->firmware) release_firmware(fw->firmware); if (wow_fw->firmware) release_firmware(wow_fw->firmware); destroy_workqueue(rtwdev->tx_wq); timer_delete_sync(&rtwdev->tx_report.purge_timer); spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags); skb_queue_purge(&rtwdev->tx_report.queue); spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags); skb_queue_purge(&rtwdev->coex.queue); skb_queue_purge(&rtwdev->c2h_queue); list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, build_list) { list_del(&rsvd_pkt->build_list); kfree(rsvd_pkt); } mutex_destroy(&rtwdev->mutex); mutex_destroy(&rtwdev->hal.tx_power_mutex); } EXPORT_SYMBOL(rtw_core_deinit); int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) { bool sta_mode_only = rtwdev->hci.type == RTW_HCI_TYPE_SDIO; struct rtw_hal *hal = &rtwdev->hal; int max_tx_headroom = 0; int ret; max_tx_headroom = rtwdev->chip->tx_pkt_desc_sz; if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) max_tx_headroom += RTW_SDIO_DATA_PTR_ALIGN; hw->extra_tx_headroom = max_tx_headroom; hw->queues = IEEE80211_NUM_ACS; hw->txq_data_size = sizeof(struct rtw_txq); hw->sta_data_size = sizeof(struct rtw_sta_info); hw->vif_data_size = sizeof(struct rtw_vif); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); if (sta_mode_only) hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); else hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->available_antennas_tx = hal->antenna_tx; hw->wiphy->available_antennas_rx = hal->antenna_rx; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_TDLS_EXTERNAL_SETUP; hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS; hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev); if (!sta_mode_only && rtwdev->chip->id == RTW_CHIP_TYPE_8822C) { hw->wiphy->iface_combinations = rtw_iface_combs; hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw_iface_combs); } wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); #ifdef CONFIG_PM hw->wiphy->wowlan = rtwdev->chip->wowlan_stub; hw->wiphy->max_sched_scan_ssids = rtwdev->chip->max_sched_scan_ssids; #endif rtw_set_supported_band(hw, rtwdev->chip); SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr); hw->wiphy->sar_capa = &rtw_sar_capa; ret = rtw_regd_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to init regd\n"); return ret; } rtw_led_init(rtwdev); ret = ieee80211_register_hw(hw); if (ret) { rtw_err(rtwdev, "failed to register hw\n"); goto led_deinit; } ret = rtw_regd_hint(rtwdev); if (ret) { rtw_err(rtwdev, "failed to hint regd\n"); goto led_deinit; } rtw_debugfs_init(rtwdev); rtwdev->bf_info.bfer_mu_cnt = 0; rtwdev->bf_info.bfer_su_cnt = 0; return 0; led_deinit: rtw_led_deinit(rtwdev); return ret; } EXPORT_SYMBOL(rtw_register_hw); void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) { const struct rtw_chip_info *chip = rtwdev->chip; ieee80211_unregister_hw(hw); rtw_unset_supported_band(hw, chip); rtw_debugfs_deinit(rtwdev); rtw_led_deinit(rtwdev); } EXPORT_SYMBOL(rtw_unregister_hw); static void rtw_swap_reg_nbytes(struct rtw_dev *rtwdev, const struct rtw_hw_reg *reg1, const struct rtw_hw_reg *reg2, u8 nbytes) { u8 i; for (i = 0; i < nbytes; i++) { u8 v1 = rtw_read8(rtwdev, reg1->addr + i); u8 v2 = rtw_read8(rtwdev, reg2->addr + i); rtw_write8(rtwdev, reg1->addr + i, v2); rtw_write8(rtwdev, reg2->addr + i, v1); } } static void rtw_swap_reg_mask(struct rtw_dev *rtwdev, const struct rtw_hw_reg *reg1, const struct rtw_hw_reg *reg2) { u32 v1, v2; v1 = rtw_read32_mask(rtwdev, reg1->addr, reg1->mask); v2 = rtw_read32_mask(rtwdev, reg2->addr, reg2->mask); rtw_write32_mask(rtwdev, reg2->addr, reg2->mask, v1); rtw_write32_mask(rtwdev, reg1->addr, reg1->mask, v2); } struct rtw_iter_port_switch_data { struct rtw_dev *rtwdev; struct rtw_vif *rtwvif_ap; }; static void rtw_port_switch_iter(void *data, struct ieee80211_vif *vif) { struct rtw_iter_port_switch_data *iter_data = data; struct rtw_dev *rtwdev = iter_data->rtwdev; struct rtw_vif *rtwvif_target = (struct rtw_vif *)vif->drv_priv; struct rtw_vif *rtwvif_ap = iter_data->rtwvif_ap; const struct rtw_hw_reg *reg1, *reg2; if (rtwvif_target->port != RTW_PORT_0) return; rtw_dbg(rtwdev, RTW_DBG_STATE, "AP port switch from %d -> %d\n", rtwvif_ap->port, rtwvif_target->port); /* Leave LPS so the value swapped are not in PS mode */ rtw_leave_lps(rtwdev); reg1 = &rtwvif_ap->conf->net_type; reg2 = &rtwvif_target->conf->net_type; rtw_swap_reg_mask(rtwdev, reg1, reg2); reg1 = &rtwvif_ap->conf->mac_addr; reg2 = &rtwvif_target->conf->mac_addr; rtw_swap_reg_nbytes(rtwdev, reg1, reg2, ETH_ALEN); reg1 = &rtwvif_ap->conf->bssid; reg2 = &rtwvif_target->conf->bssid; rtw_swap_reg_nbytes(rtwdev, reg1, reg2, ETH_ALEN); reg1 = &rtwvif_ap->conf->bcn_ctrl; reg2 = &rtwvif_target->conf->bcn_ctrl; rtw_swap_reg_nbytes(rtwdev, reg1, reg2, 1); swap(rtwvif_target->port, rtwvif_ap->port); swap(rtwvif_target->conf, rtwvif_ap->conf); rtw_fw_default_port(rtwdev, rtwvif_target); } void rtw_core_port_switch(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; struct rtw_iter_port_switch_data iter_data; if (vif->type != NL80211_IFTYPE_AP || rtwvif->port == RTW_PORT_0) return; iter_data.rtwdev = rtwdev; iter_data.rtwvif_ap = rtwvif; rtw_iterate_vifs(rtwdev, rtw_port_switch_iter, &iter_data); } static void rtw_check_sta_active_iter(void *data, struct ieee80211_vif *vif) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; bool *active = data; if (*active) return; if (vif->type != NL80211_IFTYPE_STATION) return; if (vif->cfg.assoc || !is_zero_ether_addr(rtwvif->bssid)) *active = true; } bool rtw_core_check_sta_active(struct rtw_dev *rtwdev) { bool sta_active = false; rtw_iterate_vifs(rtwdev, rtw_check_sta_active_iter, &sta_active); return rtwdev->ap_active || sta_active; } void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable) { if (!rtwdev->ap_active) return; if (enable) { rtw_write32_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); rtw_write32_clr(rtwdev, REG_TXPAUSE, BIT_HIGH_QUEUE); } else { rtw_write32_clr(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); rtw_write32_set(rtwdev, REG_TXPAUSE, BIT_HIGH_QUEUE); } } MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless core module"); MODULE_LICENSE("Dual BSD/GPL");
11 11 11 11 10 11 2 2 2 1 4 4 4 5 4 5 4 4 10 10 10 10 10 10 8 6 1 5 5 10 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 // SPDX-License-Identifier: GPL-2.0-or-later /* * IguanaWorks USB IR Transceiver support * * Copyright (C) 2012 Sean Young <sean@mess.org> */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <linux/slab.h> #include <linux/completion.h> #include <media/rc-core.h> #define BUF_SIZE 152 struct iguanair { struct rc_dev *rc; struct device *dev; struct usb_device *udev; uint16_t version; uint8_t bufsize; uint8_t cycle_overhead; /* receiver support */ bool receiver_on; dma_addr_t dma_in, dma_out; uint8_t *buf_in; struct urb *urb_in, *urb_out; struct completion completion; /* transmit support */ bool tx_overflow; uint32_t carrier; struct send_packet *packet; char name[64]; char phys[64]; }; #define CMD_NOP 0x00 #define CMD_GET_VERSION 0x01 #define CMD_GET_BUFSIZE 0x11 #define CMD_GET_FEATURES 0x10 #define CMD_SEND 0x15 #define CMD_EXECUTE 0x1f #define CMD_RX_OVERFLOW 0x31 #define CMD_TX_OVERFLOW 0x32 #define CMD_RECEIVER_ON 0x12 #define CMD_RECEIVER_OFF 0x14 #define DIR_IN 0xdc #define DIR_OUT 0xcd #define MAX_IN_PACKET 8u #define MAX_OUT_PACKET (sizeof(struct send_packet) + BUF_SIZE) #define TIMEOUT 1000 #define RX_RESOLUTION 21 struct packet { uint16_t start; uint8_t direction; uint8_t cmd; }; struct send_packet { struct packet header; uint8_t length; uint8_t channels; uint8_t busy7; uint8_t busy4; uint8_t payload[]; }; static void process_ir_data(struct iguanair *ir, unsigned len) { if (len >= 4 && ir->buf_in[0] == 0 && ir->buf_in[1] == 0) { switch (ir->buf_in[3]) { case CMD_GET_VERSION: if (len == 6) { ir->version = (ir->buf_in[5] << 8) | ir->buf_in[4]; complete(&ir->completion); } break; case CMD_GET_BUFSIZE: if (len >= 5) { ir->bufsize = ir->buf_in[4]; complete(&ir->completion); } break; case CMD_GET_FEATURES: if (len > 5) { ir->cycle_overhead = ir->buf_in[5]; complete(&ir->completion); } break; case CMD_TX_OVERFLOW: ir->tx_overflow = true; fallthrough; case CMD_RECEIVER_OFF: case CMD_RECEIVER_ON: case CMD_SEND: complete(&ir->completion); break; case CMD_RX_OVERFLOW: dev_warn(ir->dev, "receive overflow\n"); ir_raw_event_overflow(ir->rc); break; default: dev_warn(ir->dev, "control code %02x received\n", ir->buf_in[3]); break; } } else if (len >= 7) { struct ir_raw_event rawir = {}; unsigned i; bool event = false; for (i = 0; i < 7; i++) { if (ir->buf_in[i] == 0x80) { rawir.pulse = false; rawir.duration = 21845; } else { rawir.pulse = (ir->buf_in[i] & 0x80) == 0; rawir.duration = ((ir->buf_in[i] & 0x7f) + 1) * RX_RESOLUTION; } if (ir_raw_event_store_with_filter(ir->rc, &rawir)) event = true; } if (event) ir_raw_event_handle(ir->rc); } } static void iguanair_rx(struct urb *urb) { struct iguanair *ir; int rc; if (!urb) return; ir = urb->context; if (!ir) return; switch (urb->status) { case 0: process_ir_data(ir, urb->actual_length); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; case -EPIPE: default: dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status); break; } rc = usb_submit_urb(urb, GFP_ATOMIC); if (rc && rc != -ENODEV) dev_warn(ir->dev, "failed to resubmit urb: %d\n", rc); } static void iguanair_irq_out(struct urb *urb) { struct iguanair *ir = urb->context; if (urb->status) dev_dbg(ir->dev, "Error: out urb status = %d\n", urb->status); /* if we sent an nop packet, do not expect a response */ if (urb->status == 0 && ir->packet->header.cmd == CMD_NOP) complete(&ir->completion); } static int iguanair_send(struct iguanair *ir, unsigned size) { int rc; reinit_completion(&ir->completion); ir->urb_out->transfer_buffer_length = size; rc = usb_submit_urb(ir->urb_out, GFP_KERNEL); if (rc) return rc; if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) { usb_kill_urb(ir->urb_out); return -ETIMEDOUT; } return rc; } static int iguanair_get_features(struct iguanair *ir) { int rc; /* * On cold boot, the iguanair initializes on the first packet * received but does not process that packet. Send an empty * packet. */ ir->packet->header.start = 0; ir->packet->header.direction = DIR_OUT; ir->packet->header.cmd = CMD_NOP; iguanair_send(ir, sizeof(ir->packet->header)); ir->packet->header.cmd = CMD_GET_VERSION; rc = iguanair_send(ir, sizeof(ir->packet->header)); if (rc) { dev_info(ir->dev, "failed to get version\n"); goto out; } if (ir->version < 0x205) { dev_err(ir->dev, "firmware 0x%04x is too old\n", ir->version); rc = -ENODEV; goto out; } ir->bufsize = 150; ir->cycle_overhead = 65; ir->packet->header.cmd = CMD_GET_BUFSIZE; rc = iguanair_send(ir, sizeof(ir->packet->header)); if (rc) { dev_info(ir->dev, "failed to get buffer size\n"); goto out; } if (ir->bufsize > BUF_SIZE) { dev_info(ir->dev, "buffer size %u larger than expected\n", ir->bufsize); ir->bufsize = BUF_SIZE; } ir->packet->header.cmd = CMD_GET_FEATURES; rc = iguanair_send(ir, sizeof(ir->packet->header)); if (rc) dev_info(ir->dev, "failed to get features\n"); out: return rc; } static int iguanair_receiver(struct iguanair *ir, bool enable) { ir->packet->header.start = 0; ir->packet->header.direction = DIR_OUT; ir->packet->header.cmd = enable ? CMD_RECEIVER_ON : CMD_RECEIVER_OFF; return iguanair_send(ir, sizeof(ir->packet->header)); } /* * The iguanair creates the carrier by busy spinning after each half period. * This is counted in CPU cycles, with the CPU running at 24MHz. It is * broken down into 7-cycles and 4-cyles delays, with a preference for * 4-cycle delays, minus the overhead of the loop itself (cycle_overhead). */ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier) { struct iguanair *ir = dev->priv; if (carrier < 25000 || carrier > 150000) return -EINVAL; if (carrier != ir->carrier) { uint32_t cycles, fours, sevens; ir->carrier = carrier; cycles = DIV_ROUND_CLOSEST(24000000, carrier * 2) - ir->cycle_overhead; /* * Calculate minimum number of 7 cycles needed so * we are left with a multiple of 4; so we want to have * (sevens * 7) & 3 == cycles & 3 */ sevens = (4 - cycles) & 3; fours = (cycles - sevens * 7) / 4; /* * The firmware interprets these values as a relative offset * for a branch. Immediately following the branches, there * 4 instructions of 7 cycles (2 bytes each) and 110 * instructions of 4 cycles (1 byte each). A relative branch * of 0 will execute all of them, branch further for less * cycle burning. */ ir->packet->busy7 = (4 - sevens) * 2; ir->packet->busy4 = 110 - fours; } return 0; } static int iguanair_set_tx_mask(struct rc_dev *dev, uint32_t mask) { struct iguanair *ir = dev->priv; if (mask > 15) return 4; ir->packet->channels = mask << 4; return 0; } static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count) { struct iguanair *ir = dev->priv; unsigned int i, size, p, periods; int rc; /* convert from us to carrier periods */ for (i = size = 0; i < count; i++) { periods = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000); while (periods) { p = min(periods, 127u); if (size >= ir->bufsize) { rc = -EINVAL; goto out; } ir->packet->payload[size++] = p | ((i & 1) ? 0x80 : 0); periods -= p; } } ir->packet->header.start = 0; ir->packet->header.direction = DIR_OUT; ir->packet->header.cmd = CMD_SEND; ir->packet->length = size; ir->tx_overflow = false; rc = iguanair_send(ir, sizeof(*ir->packet) + size); if (rc == 0 && ir->tx_overflow) rc = -EOVERFLOW; out: return rc ? rc : count; } static int iguanair_open(struct rc_dev *rdev) { struct iguanair *ir = rdev->priv; int rc; rc = iguanair_receiver(ir, true); if (rc == 0) ir->receiver_on = true; return rc; } static void iguanair_close(struct rc_dev *rdev) { struct iguanair *ir = rdev->priv; int rc; rc = iguanair_receiver(ir, false); ir->receiver_on = false; if (rc && rc != -ENODEV) dev_warn(ir->dev, "failed to disable receiver: %d\n", rc); } static int iguanair_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct iguanair *ir; struct rc_dev *rc; int ret, pipein, pipeout; struct usb_host_interface *idesc; idesc = intf->cur_altsetting; if (idesc->desc.bNumEndpoints < 2) return -ENODEV; ir = kzalloc(sizeof(*ir), GFP_KERNEL); rc = rc_allocate_device(RC_DRIVER_IR_RAW); if (!ir || !rc) { ret = -ENOMEM; goto out; } ir->buf_in = usb_alloc_coherent(udev, MAX_IN_PACKET, GFP_KERNEL, &ir->dma_in); ir->packet = usb_alloc_coherent(udev, MAX_OUT_PACKET, GFP_KERNEL, &ir->dma_out); ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); ir->urb_out = usb_alloc_urb(0, GFP_KERNEL); if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out || !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) || !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) { ret = -ENOMEM; goto out; } ir->rc = rc; ir->dev = &intf->dev; ir->udev = udev; init_completion(&ir->completion); pipeout = usb_sndintpipe(udev, idesc->endpoint[1].desc.bEndpointAddress); usb_fill_int_urb(ir->urb_out, udev, pipeout, ir->packet, MAX_OUT_PACKET, iguanair_irq_out, ir, 1); ir->urb_out->transfer_dma = ir->dma_out; ir->urb_out->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; pipein = usb_rcvintpipe(udev, idesc->endpoint[0].desc.bEndpointAddress); usb_fill_int_urb(ir->urb_in, udev, pipein, ir->buf_in, MAX_IN_PACKET, iguanair_rx, ir, 1); ir->urb_in->transfer_dma = ir->dma_in; ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ret = usb_submit_urb(ir->urb_in, GFP_KERNEL); if (ret) { dev_warn(&intf->dev, "failed to submit urb: %d\n", ret); goto out; } ret = iguanair_get_features(ir); if (ret) goto out2; snprintf(ir->name, sizeof(ir->name), "IguanaWorks USB IR Transceiver version 0x%04x", ir->version); usb_make_path(ir->udev, ir->phys, sizeof(ir->phys)); rc->device_name = ir->name; rc->input_phys = ir->phys; usb_to_input_id(ir->udev, &rc->input_id); rc->dev.parent = &intf->dev; rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rc->priv = ir; rc->open = iguanair_open; rc->close = iguanair_close; rc->s_tx_mask = iguanair_set_tx_mask; rc->s_tx_carrier = iguanair_set_tx_carrier; rc->tx_ir = iguanair_tx; rc->driver_name = KBUILD_MODNAME; rc->map_name = RC_MAP_RC6_MCE; rc->min_timeout = 1; rc->timeout = IR_DEFAULT_TIMEOUT; rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT; rc->rx_resolution = RX_RESOLUTION; iguanair_set_tx_carrier(rc, 38000); iguanair_set_tx_mask(rc, 0); ret = rc_register_device(rc); if (ret < 0) { dev_err(&intf->dev, "failed to register rc device %d", ret); goto out2; } usb_set_intfdata(intf, ir); return 0; out2: usb_kill_urb(ir->urb_in); usb_kill_urb(ir->urb_out); out: if (ir) { usb_free_urb(ir->urb_in); usb_free_urb(ir->urb_out); usb_free_coherent(udev, MAX_IN_PACKET, ir->buf_in, ir->dma_in); usb_free_coherent(udev, MAX_OUT_PACKET, ir->packet, ir->dma_out); } rc_free_device(rc); kfree(ir); return ret; } static void iguanair_disconnect(struct usb_interface *intf) { struct iguanair *ir = usb_get_intfdata(intf); rc_unregister_device(ir->rc); usb_set_intfdata(intf, NULL); usb_kill_urb(ir->urb_in); usb_kill_urb(ir->urb_out); usb_free_urb(ir->urb_in); usb_free_urb(ir->urb_out); usb_free_coherent(ir->udev, MAX_IN_PACKET, ir->buf_in, ir->dma_in); usb_free_coherent(ir->udev, MAX_OUT_PACKET, ir->packet, ir->dma_out); kfree(ir); } static int iguanair_suspend(struct usb_interface *intf, pm_message_t message) { struct iguanair *ir = usb_get_intfdata(intf); int rc = 0; if (ir->receiver_on) { rc = iguanair_receiver(ir, false); if (rc) dev_warn(ir->dev, "failed to disable receiver for suspend\n"); } usb_kill_urb(ir->urb_in); usb_kill_urb(ir->urb_out); return rc; } static int iguanair_resume(struct usb_interface *intf) { struct iguanair *ir = usb_get_intfdata(intf); int rc; rc = usb_submit_urb(ir->urb_in, GFP_KERNEL); if (rc) dev_warn(&intf->dev, "failed to submit urb: %d\n", rc); if (ir->receiver_on) { rc = iguanair_receiver(ir, true); if (rc) dev_warn(ir->dev, "failed to enable receiver after resume\n"); } return rc; } static const struct usb_device_id iguanair_table[] = { { USB_DEVICE(0x1781, 0x0938) }, { } }; static struct usb_driver iguanair_driver = { .name = KBUILD_MODNAME, .probe = iguanair_probe, .disconnect = iguanair_disconnect, .suspend = iguanair_suspend, .resume = iguanair_resume, .reset_resume = iguanair_resume, .id_table = iguanair_table, .soft_unbind = 1 /* we want to disable receiver on unbind */ }; module_usb_driver(iguanair_driver); MODULE_DESCRIPTION("IguanaWorks USB IR Transceiver"); MODULE_AUTHOR("Sean Young <sean@mess.org>"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, iguanair_table);
3 3 1 3 11 3 3 3 3 3 3 3 11 3 3 3 3 2 2 1 1 1 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 // SPDX-License-Identifier: GPL-2.0 /* USB Driver layer for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - controlling the baud rate doesn't make sense */ #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include "usb-wwan.h" /* * Generate DTR/RTS signals on the port using the SET_CONTROL_LINE_STATE request * in CDC ACM. */ static int usb_wwan_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; int val = 0; int ifnum; int res; portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) val |= USB_CDC_CTRL_DTR; if (portdata->rts_state) val |= USB_CDC_CTRL_RTS; ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; res = usb_autopm_get_interface(serial->interface); if (res) return res; res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), USB_CDC_REQ_SET_CONTROL_LINE_STATE, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, val, ifnum, NULL, 0, USB_CTRL_SET_TIMEOUT); usb_autopm_put_interface(port->serial->interface); return res; } void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; intfdata = usb_get_serial_data(port->serial); if (!intfdata->use_send_setup) return; portdata = usb_get_serial_port_data(port); /* FIXME: locking */ portdata->rts_state = on; portdata->dtr_state = on; usb_wwan_send_setup(port); } EXPORT_SYMBOL(usb_wwan_dtr_rts); int usb_wwan_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned int value; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); value = ((portdata->rts_state) ? TIOCM_RTS : 0) | ((portdata->dtr_state) ? TIOCM_DTR : 0) | ((portdata->cts_state) ? TIOCM_CTS : 0) | ((portdata->dsr_state) ? TIOCM_DSR : 0) | ((portdata->dcd_state) ? TIOCM_CAR : 0) | ((portdata->ri_state) ? TIOCM_RNG : 0); return value; } EXPORT_SYMBOL(usb_wwan_tiocmget); int usb_wwan_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(port->serial); if (!intfdata->use_send_setup) return -EINVAL; /* FIXME: what locks portdata fields ? */ if (set & TIOCM_RTS) portdata->rts_state = 1; if (set & TIOCM_DTR) portdata->dtr_state = 1; if (clear & TIOCM_RTS) portdata->rts_state = 0; if (clear & TIOCM_DTR) portdata->dtr_state = 0; return usb_wwan_send_setup(port); } EXPORT_SYMBOL(usb_wwan_tiocmset); int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; int i; int left, todo; struct urb *this_urb = NULL; /* spurious */ int err; unsigned long flags; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(port->serial); dev_dbg(&port->dev, "%s: write (%d chars)\n", __func__, count); left = count; for (i = 0; left > 0 && i < N_OUT_URB; i++) { todo = left; if (todo > OUT_BUFLEN) todo = OUT_BUFLEN; this_urb = portdata->out_urbs[i]; if (test_and_set_bit(i, &portdata->out_busy)) { if (time_before(jiffies, portdata->tx_start_time[i] + 10 * HZ)) continue; usb_unlink_urb(this_urb); continue; } dev_dbg(&port->dev, "%s: endpoint %d buf %d\n", __func__, usb_pipeendpoint(this_urb->pipe), i); err = usb_autopm_get_interface_async(port->serial->interface); if (err < 0) { clear_bit(i, &portdata->out_busy); break; } /* send the data */ memcpy(this_urb->transfer_buffer, buf, todo); this_urb->transfer_buffer_length = todo; spin_lock_irqsave(&intfdata->susp_lock, flags); if (intfdata->suspended) { usb_anchor_urb(this_urb, &portdata->delayed); spin_unlock_irqrestore(&intfdata->susp_lock, flags); } else { intfdata->in_flight++; spin_unlock_irqrestore(&intfdata->susp_lock, flags); err = usb_submit_urb(this_urb, GFP_ATOMIC); if (err) { dev_err(&port->dev, "%s: submit urb %d failed: %d\n", __func__, i, err); clear_bit(i, &portdata->out_busy); spin_lock_irqsave(&intfdata->susp_lock, flags); intfdata->in_flight--; spin_unlock_irqrestore(&intfdata->susp_lock, flags); usb_autopm_put_interface_async(port->serial->interface); break; } } portdata->tx_start_time[i] = jiffies; buf += todo; left -= todo; } count -= left; dev_dbg(&port->dev, "%s: wrote (did %d)\n", __func__, count); return count; } EXPORT_SYMBOL(usb_wwan_write); static void usb_wwan_indat_callback(struct urb *urb) { int err; int endpoint; struct usb_serial_port *port; struct device *dev; unsigned char *data = urb->transfer_buffer; int status = urb->status; endpoint = usb_pipeendpoint(urb->pipe); port = urb->context; dev = &port->dev; if (status) { dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n", __func__, status, endpoint); /* don't resubmit on fatal errors */ if (status == -ESHUTDOWN || status == -ENOENT) return; } else { if (urb->actual_length) { tty_insert_flip_string(&port->port, data, urb->actual_length); tty_flip_buffer_push(&port->port); } else dev_dbg(dev, "%s: empty read urb received\n", __func__); } /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { if (err != -EPERM && err != -ENODEV) { dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); /* busy also in error unless we are killed */ usb_mark_last_busy(port->serial->dev); } } else { usb_mark_last_busy(port->serial->dev); } } static void usb_wwan_outdat_callback(struct urb *urb) { struct usb_serial_port *port; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; unsigned long flags; int i; port = urb->context; intfdata = usb_get_serial_data(port->serial); usb_serial_port_softint(port); usb_autopm_put_interface_async(port->serial->interface); portdata = usb_get_serial_port_data(port); spin_lock_irqsave(&intfdata->susp_lock, flags); intfdata->in_flight--; spin_unlock_irqrestore(&intfdata->susp_lock, flags); for (i = 0; i < N_OUT_URB; ++i) { if (portdata->out_urbs[i] == urb) { smp_mb__before_atomic(); clear_bit(i, &portdata->out_busy); break; } } } unsigned int usb_wwan_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; unsigned int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; if (this_urb && !test_bit(i, &portdata->out_busy)) data_len += OUT_BUFLEN; } dev_dbg(&port->dev, "%s: %u\n", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_write_room); unsigned int usb_wwan_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; unsigned int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; /* FIXME: This locking is insufficient as this_urb may go unused during the test */ if (this_urb && test_bit(i, &portdata->out_busy)) data_len += this_urb->transfer_buffer_length; } dev_dbg(&port->dev, "%s: %u\n", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_chars_in_buffer); int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; struct usb_serial *serial = port->serial; int i, err; struct urb *urb; portdata = usb_get_serial_port_data(port); intfdata = usb_get_serial_data(serial); if (port->interrupt_in_urb) { err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (err) { dev_err(&port->dev, "%s: submit int urb failed: %d\n", __func__, err); } } /* Start reading from the IN endpoint */ for (i = 0; i < N_IN_URB; i++) { urb = portdata->in_urbs[i]; if (!urb) continue; err = usb_submit_urb(urb, GFP_KERNEL); if (err) { dev_err(&port->dev, "%s: submit read urb %d failed: %d\n", __func__, i, err); } } spin_lock_irq(&intfdata->susp_lock); if (++intfdata->open_ports == 1) serial->interface->needs_remote_wakeup = 1; spin_unlock_irq(&intfdata->susp_lock); /* this balances a get in the generic USB serial code */ usb_autopm_put_interface(serial->interface); return 0; } EXPORT_SYMBOL(usb_wwan_open); static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata) { int i; for (i = 0; i < N_OUT_URB; i++) { if (urb == portdata->out_urbs[i]) { clear_bit(i, &portdata->out_busy); break; } } } void usb_wwan_close(struct usb_serial_port *port) { int i; struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; portdata = usb_get_serial_port_data(port); /* * Need to take susp_lock to make sure port is not already being * resumed, but no need to hold it due to the tty-port initialized * flag. */ spin_lock_irq(&intfdata->susp_lock); if (--intfdata->open_ports == 0) serial->interface->needs_remote_wakeup = 0; spin_unlock_irq(&intfdata->susp_lock); for (;;) { urb = usb_get_from_anchor(&portdata->delayed); if (!urb) break; unbusy_queued_urb(urb, portdata); usb_autopm_put_interface_async(serial->interface); } for (i = 0; i < N_IN_URB; i++) usb_kill_urb(portdata->in_urbs[i]); for (i = 0; i < N_OUT_URB; i++) usb_kill_urb(portdata->out_urbs[i]); usb_kill_urb(port->interrupt_in_urb); usb_autopm_get_interface_no_resume(serial->interface); } EXPORT_SYMBOL(usb_wwan_close); static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, int endpoint, int dir, void *ctx, char *buf, int len, void (*callback) (struct urb *)) { struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ if (!urb) return NULL; usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); if (intfdata->use_zlp && dir == USB_DIR_OUT) urb->transfer_flags |= URB_ZERO_PACKET; return urb; } int usb_wwan_port_probe(struct usb_serial_port *port) { struct usb_wwan_port_private *portdata; struct urb *urb; u8 *buffer; int i; if (!port->bulk_in_size || !port->bulk_out_size) return -ENODEV; portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); if (!portdata) return -ENOMEM; init_usb_anchor(&portdata->delayed); for (i = 0; i < N_IN_URB; i++) { buffer = (u8 *)__get_free_page(GFP_KERNEL); if (!buffer) goto bail_out_error; portdata->in_buffer[i] = buffer; urb = usb_wwan_setup_urb(port, port->bulk_in_endpointAddress, USB_DIR_IN, port, buffer, IN_BUFLEN, usb_wwan_indat_callback); portdata->in_urbs[i] = urb; } for (i = 0; i < N_OUT_URB; i++) { buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); if (!buffer) goto bail_out_error2; portdata->out_buffer[i] = buffer; urb = usb_wwan_setup_urb(port, port->bulk_out_endpointAddress, USB_DIR_OUT, port, buffer, OUT_BUFLEN, usb_wwan_outdat_callback); portdata->out_urbs[i] = urb; } usb_set_serial_port_data(port, portdata); return 0; bail_out_error2: for (i = 0; i < N_OUT_URB; i++) { usb_free_urb(portdata->out_urbs[i]); kfree(portdata->out_buffer[i]); } bail_out_error: for (i = 0; i < N_IN_URB; i++) { usb_free_urb(portdata->in_urbs[i]); free_page((unsigned long)portdata->in_buffer[i]); } kfree(portdata); return -ENOMEM; } EXPORT_SYMBOL_GPL(usb_wwan_port_probe); void usb_wwan_port_remove(struct usb_serial_port *port) { int i; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); usb_set_serial_port_data(port, NULL); for (i = 0; i < N_IN_URB; i++) { usb_free_urb(portdata->in_urbs[i]); free_page((unsigned long)portdata->in_buffer[i]); } for (i = 0; i < N_OUT_URB; i++) { usb_free_urb(portdata->out_urbs[i]); kfree(portdata->out_buffer[i]); } kfree(portdata); } EXPORT_SYMBOL(usb_wwan_port_remove); #ifdef CONFIG_PM static void stop_urbs(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); if (!portdata) continue; for (j = 0; j < N_IN_URB; j++) usb_kill_urb(portdata->in_urbs[j]); for (j = 0; j < N_OUT_URB; j++) usb_kill_urb(portdata->out_urbs[j]); usb_kill_urb(port->interrupt_in_urb); } } int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); spin_lock_irq(&intfdata->susp_lock); if (PMSG_IS_AUTO(message)) { if (intfdata->in_flight) { spin_unlock_irq(&intfdata->susp_lock); return -EBUSY; } } intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); stop_urbs(serial); return 0; } EXPORT_SYMBOL(usb_wwan_suspend); /* Caller must hold susp_lock. */ static int usb_wwan_submit_delayed_urbs(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *data = usb_get_serial_data(serial); struct usb_wwan_port_private *portdata; struct urb *urb; int err_count = 0; int err; portdata = usb_get_serial_port_data(port); for (;;) { urb = usb_get_from_anchor(&portdata->delayed); if (!urb) break; err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { dev_err(&port->dev, "%s: submit urb failed: %d\n", __func__, err); err_count++; unbusy_queued_urb(urb, portdata); usb_autopm_put_interface_async(serial->interface); continue; } data->in_flight++; } if (err_count) return -EIO; return 0; } int usb_wwan_resume(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct usb_wwan_port_private *portdata; struct urb *urb; int err; int err_count = 0; spin_lock_irq(&intfdata->susp_lock); for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!tty_port_initialized(&port->port)) continue; portdata = usb_get_serial_port_data(port); if (port->interrupt_in_urb) { err = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (err) { dev_err(&port->dev, "%s: submit int urb failed: %d\n", __func__, err); err_count++; } } err = usb_wwan_submit_delayed_urbs(port); if (err) err_count++; for (j = 0; j < N_IN_URB; j++) { urb = portdata->in_urbs[j]; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { dev_err(&port->dev, "%s: submit read urb %d failed: %d\n", __func__, i, err); err_count++; } } } intfdata->suspended = 0; spin_unlock_irq(&intfdata->susp_lock); if (err_count) return -EIO; return 0; } EXPORT_SYMBOL(usb_wwan_resume); #endif MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2");
19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 // SPDX-License-Identifier: GPL-2.0-or-later /* * Syntek STK1135 subdriver * * Copyright (c) 2013 Ondrej Zary * * Based on Syntekdriver (stk11xx) by Nicolas VIVIEN: * http://syntekdriver.sourceforge.net */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "stk1135" #include "gspca.h" #include "stk1135.h" MODULE_AUTHOR("Ondrej Zary"); MODULE_DESCRIPTION("Syntek STK1135 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 pkt_seq; u8 sensor_page; bool flip_status; u8 flip_debounce; struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; }; static const struct v4l2_pix_format stk1135_modes[] = { /* default mode (this driver supports variable resolution) */ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB}, }; /* -- read a register -- */ static u8 reg_r(struct gspca_dev *gspca_dev, u16 index) { struct usb_device *dev = gspca_dev->dev; int ret; if (gspca_dev->usb_err < 0) return 0; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x00, index, gspca_dev->usb_buf, 1, 500); gspca_dbg(gspca_dev, D_USBI, "reg_r 0x%x=0x%02x\n", index, gspca_dev->usb_buf[0]); if (ret < 0) { pr_err("reg_r 0x%x err %d\n", index, ret); gspca_dev->usb_err = ret; return 0; } return gspca_dev->usb_buf[0]; } /* -- write a register -- */ static void reg_w(struct gspca_dev *gspca_dev, u16 index, u8 val) { int ret; struct usb_device *dev = gspca_dev->dev; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, index, NULL, 0, 500); gspca_dbg(gspca_dev, D_USBO, "reg_w 0x%x:=0x%02x\n", index, val); if (ret < 0) { pr_err("reg_w 0x%x err %d\n", index, ret); gspca_dev->usb_err = ret; } } static void reg_w_mask(struct gspca_dev *gspca_dev, u16 index, u8 val, u8 mask) { val = (reg_r(gspca_dev, index) & ~mask) | (val & mask); reg_w(gspca_dev, index, val); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { gspca_dev->cam.cam_mode = stk1135_modes; gspca_dev->cam.nmodes = ARRAY_SIZE(stk1135_modes); return 0; } static int stk1135_serial_wait_ready(struct gspca_dev *gspca_dev) { int i = 0; u8 val; do { val = reg_r(gspca_dev, STK1135_REG_SICTL + 1); if (i++ > 500) { /* maximum retry count */ pr_err("serial bus timeout: status=0x%02x\n", val); return -1; } /* repeat if BUSY or WRITE/READ not finished */ } while ((val & 0x10) || !(val & 0x05)); return 0; } static u8 sensor_read_8(struct gspca_dev *gspca_dev, u8 addr) { reg_w(gspca_dev, STK1135_REG_SBUSR, addr); /* begin read */ reg_w(gspca_dev, STK1135_REG_SICTL, 0x20); /* wait until finished */ if (stk1135_serial_wait_ready(gspca_dev)) { pr_err("Sensor read failed\n"); return 0; } return reg_r(gspca_dev, STK1135_REG_SBUSR + 1); } static u16 sensor_read_16(struct gspca_dev *gspca_dev, u8 addr) { return (sensor_read_8(gspca_dev, addr) << 8) | sensor_read_8(gspca_dev, 0xf1); } static void sensor_write_8(struct gspca_dev *gspca_dev, u8 addr, u8 data) { /* load address and data registers */ reg_w(gspca_dev, STK1135_REG_SBUSW, addr); reg_w(gspca_dev, STK1135_REG_SBUSW + 1, data); /* begin write */ reg_w(gspca_dev, STK1135_REG_SICTL, 0x01); /* wait until finished */ if (stk1135_serial_wait_ready(gspca_dev)) { pr_err("Sensor write failed\n"); return; } } static void sensor_write_16(struct gspca_dev *gspca_dev, u8 addr, u16 data) { sensor_write_8(gspca_dev, addr, data >> 8); sensor_write_8(gspca_dev, 0xf1, data & 0xff); } static void sensor_set_page(struct gspca_dev *gspca_dev, u8 page) { struct sd *sd = (struct sd *) gspca_dev; if (page != sd->sensor_page) { sensor_write_16(gspca_dev, 0xf0, page); sd->sensor_page = page; } } static u16 sensor_read(struct gspca_dev *gspca_dev, u16 reg) { sensor_set_page(gspca_dev, reg >> 8); return sensor_read_16(gspca_dev, reg & 0xff); } static void sensor_write(struct gspca_dev *gspca_dev, u16 reg, u16 val) { sensor_set_page(gspca_dev, reg >> 8); sensor_write_16(gspca_dev, reg & 0xff, val); } static void sensor_write_mask(struct gspca_dev *gspca_dev, u16 reg, u16 val, u16 mask) { val = (sensor_read(gspca_dev, reg) & ~mask) | (val & mask); sensor_write(gspca_dev, reg, val); } struct sensor_val { u16 reg; u16 val; }; /* configure MT9M112 sensor */ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev) { static const struct sensor_val cfg[] = { /* restart&reset, chip enable, reserved */ { 0x00d, 0x000b }, { 0x00d, 0x0008 }, { 0x035, 0x0022 }, /* mode ctl: AWB on, AE both, clip aper corr, defect corr, AE */ { 0x106, 0x700e }, { 0x2dd, 0x18e0 }, /* B-R thresholds, */ /* AWB */ { 0x21f, 0x0180 }, /* Cb and Cr limits */ { 0x220, 0xc814 }, { 0x221, 0x8080 }, /* lum limits, RGB gain */ { 0x222, 0xa078 }, { 0x223, 0xa078 }, /* R, B limit */ { 0x224, 0x5f20 }, { 0x228, 0xea02 }, /* mtx adj lim, adv ctl */ { 0x229, 0x867a }, /* wide gates */ /* Color correction */ /* imager gains base, delta, delta signs */ { 0x25e, 0x594c }, { 0x25f, 0x4d51 }, { 0x260, 0x0002 }, /* AWB adv ctl 2, gain offs */ { 0x2ef, 0x0008 }, { 0x2f2, 0x0000 }, /* base matrix signs, scale K1-5, K6-9 */ { 0x202, 0x00ee }, { 0x203, 0x3923 }, { 0x204, 0x0724 }, /* base matrix coef */ { 0x209, 0x00cd }, { 0x20a, 0x0093 }, { 0x20b, 0x0004 },/*K1-3*/ { 0x20c, 0x005c }, { 0x20d, 0x00d9 }, { 0x20e, 0x0053 },/*K4-6*/ { 0x20f, 0x0008 }, { 0x210, 0x0091 }, { 0x211, 0x00cf },/*K7-9*/ { 0x215, 0x0000 }, /* delta mtx signs */ /* delta matrix coef */ { 0x216, 0x0000 }, { 0x217, 0x0000 }, { 0x218, 0x0000 },/*D1-3*/ { 0x219, 0x0000 }, { 0x21a, 0x0000 }, { 0x21b, 0x0000 },/*D4-6*/ { 0x21c, 0x0000 }, { 0x21d, 0x0000 }, { 0x21e, 0x0000 },/*D7-9*/ /* enable & disable manual WB to apply color corr. settings */ { 0x106, 0xf00e }, { 0x106, 0x700e }, /* Lens shading correction */ { 0x180, 0x0007 }, /* control */ /* vertical knee 0, 2+1, 4+3 */ { 0x181, 0xde13 }, { 0x182, 0xebe2 }, { 0x183, 0x00f6 }, /* R */ { 0x184, 0xe114 }, { 0x185, 0xeadd }, { 0x186, 0xfdf6 }, /* G */ { 0x187, 0xe511 }, { 0x188, 0xede6 }, { 0x189, 0xfbf7 }, /* B */ /* horizontal knee 0, 2+1, 4+3, 5 */ { 0x18a, 0xd613 }, { 0x18b, 0xedec }, /* R .. */ { 0x18c, 0xf9f2 }, { 0x18d, 0x0000 }, /* .. R */ { 0x18e, 0xd815 }, { 0x18f, 0xe9ea }, /* G .. */ { 0x190, 0xf9f1 }, { 0x191, 0x0002 }, /* .. G */ { 0x192, 0xde10 }, { 0x193, 0xefef }, /* B .. */ { 0x194, 0xfbf4 }, { 0x195, 0x0002 }, /* .. B */ /* vertical knee 6+5, 8+7 */ { 0x1b6, 0x0e06 }, { 0x1b7, 0x2713 }, /* R */ { 0x1b8, 0x1106 }, { 0x1b9, 0x2713 }, /* G */ { 0x1ba, 0x0c03 }, { 0x1bb, 0x2a0f }, /* B */ /* horizontal knee 7+6, 9+8, 10 */ { 0x1bc, 0x1208 }, { 0x1bd, 0x1a16 }, { 0x1be, 0x0022 }, /* R */ { 0x1bf, 0x150a }, { 0x1c0, 0x1c1a }, { 0x1c1, 0x002d }, /* G */ { 0x1c2, 0x1109 }, { 0x1c3, 0x1414 }, { 0x1c4, 0x002a }, /* B */ { 0x106, 0x740e }, /* enable lens shading correction */ /* Gamma correction - context A */ { 0x153, 0x0b03 }, { 0x154, 0x4722 }, { 0x155, 0xac82 }, { 0x156, 0xdac7 }, { 0x157, 0xf5e9 }, { 0x158, 0xff00 }, /* Gamma correction - context B */ { 0x1dc, 0x0b03 }, { 0x1dd, 0x4722 }, { 0x1de, 0xac82 }, { 0x1df, 0xdac7 }, { 0x1e0, 0xf5e9 }, { 0x1e1, 0xff00 }, /* output format: RGB, invert output pixclock, output bayer */ { 0x13a, 0x4300 }, { 0x19b, 0x4300 }, /* for context A, B */ { 0x108, 0x0180 }, /* format control - enable bayer row flip */ { 0x22f, 0xd100 }, { 0x29c, 0xd100 }, /* AE A, B */ /* default prg conf, prg ctl - by 0x2d2, prg advance - PA1 */ { 0x2d2, 0x0000 }, { 0x2cc, 0x0004 }, { 0x2cb, 0x0001 }, { 0x22e, 0x0c3c }, { 0x267, 0x1010 }, /* AE tgt ctl, gain lim */ /* PLL */ { 0x065, 0xa000 }, /* clk ctl - enable PLL (clear bit 14) */ { 0x066, 0x2003 }, { 0x067, 0x0501 }, /* PLL M=128, N=3, P=1 */ { 0x065, 0x2000 }, /* disable PLL bypass (clear bit 15) */ { 0x005, 0x01b8 }, { 0x007, 0x00d8 }, /* horiz blanking B, A */ /* AE line size, shutter delay limit */ { 0x239, 0x06c0 }, { 0x23b, 0x040e }, /* for context A */ { 0x23a, 0x06c0 }, { 0x23c, 0x0564 }, /* for context B */ /* shutter width basis 60Hz, 50Hz */ { 0x257, 0x0208 }, { 0x258, 0x0271 }, /* for context A */ { 0x259, 0x0209 }, { 0x25a, 0x0271 }, /* for context B */ { 0x25c, 0x120d }, { 0x25d, 0x1712 }, /* flicker 60Hz, 50Hz */ { 0x264, 0x5e1c }, /* reserved */ /* flicker, AE gain limits, gain zone limits */ { 0x25b, 0x0003 }, { 0x236, 0x7810 }, { 0x237, 0x8304 }, { 0x008, 0x0021 }, /* vert blanking A */ }; int i; u16 width, height; for (i = 0; i < ARRAY_SIZE(cfg); i++) sensor_write(gspca_dev, cfg[i].reg, cfg[i].val); /* set output size */ width = gspca_dev->pixfmt.width; height = gspca_dev->pixfmt.height; if (width <= 640 && height <= 512) { /* context A (half readout speed)*/ sensor_write(gspca_dev, 0x1a7, width); sensor_write(gspca_dev, 0x1aa, height); /* set read mode context A */ sensor_write(gspca_dev, 0x0c8, 0x0000); /* set resize, read mode, vblank, hblank context A */ sensor_write(gspca_dev, 0x2c8, 0x0000); } else { /* context B (full readout speed) */ sensor_write(gspca_dev, 0x1a1, width); sensor_write(gspca_dev, 0x1a4, height); /* set read mode context B */ sensor_write(gspca_dev, 0x0c8, 0x0008); /* set resize, read mode, vblank, hblank context B */ sensor_write(gspca_dev, 0x2c8, 0x040b); } } static void stk1135_configure_clock(struct gspca_dev *gspca_dev) { /* configure SCLKOUT */ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x12); /* set 1 clock per pixel */ /* and positive edge clocked pulse high when pixel counter = 0 */ reg_w(gspca_dev, STK1135_REG_TCP1 + 0, 0x41); reg_w(gspca_dev, STK1135_REG_TCP1 + 1, 0x00); reg_w(gspca_dev, STK1135_REG_TCP1 + 2, 0x00); reg_w(gspca_dev, STK1135_REG_TCP1 + 3, 0x00); /* enable CLKOUT for sensor */ reg_w(gspca_dev, STK1135_REG_SENSO + 0, 0x10); /* disable STOP clock */ reg_w(gspca_dev, STK1135_REG_SENSO + 1, 0x00); /* set lower 8 bits of PLL feedback divider */ reg_w(gspca_dev, STK1135_REG_SENSO + 3, 0x07); /* set other PLL parameters */ reg_w(gspca_dev, STK1135_REG_PLLFD, 0x06); /* enable timing generator */ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x80); /* enable PLL */ reg_w(gspca_dev, STK1135_REG_SENSO + 2, 0x04); /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */ reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f); /* wait a while for sensor to catch up */ udelay(1000); } static void stk1135_camera_disable(struct gspca_dev *gspca_dev) { /* set capture end Y position to 0 */ reg_w(gspca_dev, STK1135_REG_CIEPO + 2, 0x00); reg_w(gspca_dev, STK1135_REG_CIEPO + 3, 0x00); /* disable capture */ reg_w_mask(gspca_dev, STK1135_REG_SCTRL, 0x00, 0x80); /* enable sensor standby and diasble chip enable */ sensor_write_mask(gspca_dev, 0x00d, 0x0004, 0x000c); /* disable PLL */ reg_w_mask(gspca_dev, STK1135_REG_SENSO + 2, 0x00, 0x01); /* disable timing generator */ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x00); /* enable STOP clock */ reg_w(gspca_dev, STK1135_REG_SENSO + 1, 0x20); /* disable CLKOUT for sensor */ reg_w(gspca_dev, STK1135_REG_SENSO, 0x00); /* disable sensor (GPIO5) and enable GPIO0,3,6 (?) - sensor standby? */ reg_w(gspca_dev, STK1135_REG_GCTRL, 0x49); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u16 sensor_id; char *sensor_name; struct sd *sd = (struct sd *) gspca_dev; /* set GPIO3,4,5,6 direction to output */ reg_w(gspca_dev, STK1135_REG_GCTRL + 2, 0x78); /* enable sensor (GPIO5) */ reg_w(gspca_dev, STK1135_REG_GCTRL, (1 << 5)); /* disable ROM interface */ reg_w(gspca_dev, STK1135_REG_GCTRL + 3, 0x80); /* enable interrupts from GPIO8 (flip sensor) and GPIO9 (???) */ reg_w(gspca_dev, STK1135_REG_ICTRL + 1, 0x00); reg_w(gspca_dev, STK1135_REG_ICTRL + 3, 0x03); /* enable remote wakeup from GPIO9 (???) */ reg_w(gspca_dev, STK1135_REG_RMCTL + 1, 0x00); reg_w(gspca_dev, STK1135_REG_RMCTL + 3, 0x02); /* reset serial interface */ reg_w(gspca_dev, STK1135_REG_SICTL, 0x80); reg_w(gspca_dev, STK1135_REG_SICTL, 0x00); /* set sensor address */ reg_w(gspca_dev, STK1135_REG_SICTL + 3, 0xba); /* disable alt 2-wire serial interface */ reg_w(gspca_dev, STK1135_REG_ASIC + 3, 0x00); stk1135_configure_clock(gspca_dev); /* read sensor ID */ sd->sensor_page = 0xff; sensor_id = sensor_read(gspca_dev, 0x000); switch (sensor_id) { case 0x148c: sensor_name = "MT9M112"; break; default: sensor_name = "unknown"; } pr_info("Detected sensor type %s (0x%x)\n", sensor_name, sensor_id); stk1135_camera_disable(gspca_dev); return gspca_dev->usb_err; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 width, height; /* enable sensor (GPIO5) */ reg_w(gspca_dev, STK1135_REG_GCTRL, (1 << 5)); stk1135_configure_clock(gspca_dev); /* set capture start position X = 0, Y = 0 */ reg_w(gspca_dev, STK1135_REG_CISPO + 0, 0x00); reg_w(gspca_dev, STK1135_REG_CISPO + 1, 0x00); reg_w(gspca_dev, STK1135_REG_CISPO + 2, 0x00); reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00); /* set capture end position */ width = gspca_dev->pixfmt.width; height = gspca_dev->pixfmt.height; reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff); reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8); reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff); reg_w(gspca_dev, STK1135_REG_CIEPO + 3, height >> 8); /* set 8-bit mode */ reg_w(gspca_dev, STK1135_REG_SCTRL, 0x20); stk1135_configure_mt9m112(gspca_dev); /* enable capture */ reg_w_mask(gspca_dev, STK1135_REG_SCTRL, 0x80, 0x80); if (gspca_dev->usb_err >= 0) gspca_dbg(gspca_dev, D_STREAM, "camera started alt: 0x%02x\n", gspca_dev->alt); sd->pkt_seq = 0; return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct usb_device *dev = gspca_dev->dev; usb_set_interface(dev, gspca_dev->iface, 0); stk1135_camera_disable(gspca_dev); gspca_dbg(gspca_dev, D_STREAM, "camera stopped\n"); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int skip = sizeof(struct stk1135_pkt_header); bool flip; enum gspca_packet_type pkt_type = INTER_PACKET; struct stk1135_pkt_header *hdr = (void *)data; u8 seq; if (len < 4) { gspca_dbg(gspca_dev, D_PACK, "received short packet (less than 4 bytes)\n"); return; } /* GPIO 8 is flip sensor (1 = normal position, 0 = flipped to back) */ flip = !(le16_to_cpu(hdr->gpio) & (1 << 8)); /* it's a switch, needs software debounce */ if (sd->flip_status != flip) sd->flip_debounce++; else sd->flip_debounce = 0; /* check sequence number (not present in new frame packets) */ if (!(hdr->flags & STK1135_HDR_FRAME_START)) { seq = hdr->seq & STK1135_HDR_SEQ_MASK; if (seq != sd->pkt_seq) { gspca_dbg(gspca_dev, D_PACK, "received out-of-sequence packet\n"); /* resync sequence and discard packet */ sd->pkt_seq = seq; gspca_dev->last_packet_type = DISCARD_PACKET; return; } } sd->pkt_seq++; if (sd->pkt_seq > STK1135_HDR_SEQ_MASK) sd->pkt_seq = 0; if (len == sizeof(struct stk1135_pkt_header)) return; if (hdr->flags & STK1135_HDR_FRAME_START) { /* new frame */ skip = 8; /* the header is longer */ gspca_frame_add(gspca_dev, LAST_PACKET, data, 0); pkt_type = FIRST_PACKET; } gspca_frame_add(gspca_dev, pkt_type, data + skip, len - skip); } static void sethflip(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->flip_status) val = !val; sensor_write_mask(gspca_dev, 0x020, val ? 0x0002 : 0x0000 , 0x0002); } static void setvflip(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; if (sd->flip_status) val = !val; sensor_write_mask(gspca_dev, 0x020, val ? 0x0001 : 0x0000 , 0x0001); } static void stk1135_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->flip_debounce > 100) { sd->flip_status = !sd->flip_status; sethflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip)); setvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->vflip)); } } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_HFLIP: sethflip(gspca_dev, ctrl->val); break; case V4L2_CID_VFLIP: setvflip(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 2); sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } static void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt) { fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U); fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U); /* round up to even numbers */ fmt->fmt.pix.width += (fmt->fmt.pix.width & 1); fmt->fmt.pix.height += (fmt->fmt.pix.height & 1); fmt->fmt.pix.bytesperline = fmt->fmt.pix.width; fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height; } static int stk1135_enum_framesizes(struct gspca_dev *gspca_dev, struct v4l2_frmsizeenum *fsize) { if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = 32; fsize->stepwise.min_height = 32; fsize->stepwise.max_width = 1280; fsize->stepwise.max_height = 1024; fsize->stepwise.step_width = 2; fsize->stepwise.step_height = 2; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = stk1135_dq_callback, .try_fmt = stk1135_try_fmt, .enum_framesizes = stk1135_enum_framesizes, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x174f, 0x6a31)}, /* ASUS laptop, MT9M112 sensor */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
3 3 2 1 1 1 1 3 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include "mt7601u.h" #include "usb.h" #include "trace.h" static const struct usb_device_id mt7601u_device_table[] = { { USB_DEVICE(0x0b05, 0x17d3) }, { USB_DEVICE(0x0e8d, 0x760a) }, { USB_DEVICE(0x0e8d, 0x760b) }, { USB_DEVICE(0x13d3, 0x3431) }, { USB_DEVICE(0x13d3, 0x3434) }, { USB_DEVICE(0x148f, 0x7601) }, { USB_DEVICE(0x148f, 0x760a) }, { USB_DEVICE(0x148f, 0x760b) }, { USB_DEVICE(0x148f, 0x760c) }, { USB_DEVICE(0x148f, 0x760d) }, { USB_DEVICE(0x2001, 0x3d04) }, { USB_DEVICE(0x2717, 0x4106) }, { USB_DEVICE(0x2955, 0x0001) }, { USB_DEVICE(0x2955, 0x1001) }, { USB_DEVICE(0x2955, 0x1003) }, { USB_DEVICE(0x2a5f, 0x1000) }, { USB_DEVICE(0x7392, 0x7710) }, { 0, } }; bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len, struct mt7601u_dma_buf *buf) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); buf->len = len; buf->urb = usb_alloc_urb(0, GFP_KERNEL); buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma); return !buf->urb || !buf->buf; } void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma); usb_free_urb(buf->urb); } int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx, struct mt7601u_dma_buf *buf, gfp_t gfp, usb_complete_t complete_fn, void *context) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); unsigned pipe; int ret; if (dir == USB_DIR_IN) pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]); else pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]); usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len, complete_fn, context); buf->urb->transfer_dma = buf->dma; buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; trace_mt_submit_urb(dev, buf->urb); ret = usb_submit_urb(buf->urb, gfp); if (ret) dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n", dir, ep_idx, ret); return ret; } void mt7601u_complete_urb(struct urb *urb) { struct completion *cmpl = urb->context; complete(cmpl); } int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, const u8 direction, const u16 val, const u16 offset, void *buf, const size_t buflen) { int i, ret; struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE; const unsigned int pipe = (direction == USB_DIR_IN) ? usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { ret = usb_control_msg(usb_dev, pipe, req, req_type, val, offset, buf, buflen, MT_VEND_REQ_TOUT_MS); trace_mt_vend_req(dev, pipe, req, req_type, val, offset, buf, buflen, ret); if (ret == -ENODEV) set_bit(MT7601U_STATE_REMOVED, &dev->state); if (ret >= 0 || ret == -ENODEV) return ret; msleep(5); } dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n", req, offset, ret); return ret; } void mt7601u_vendor_reset(struct mt7601u_dev *dev) { mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, MT_VEND_DEV_MODE_RESET, 0, NULL, 0); } /* should be called with vendor_req_mutex held */ static u32 __mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { int ret; u32 val = ~0; WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, 0, offset, dev->vend_buf, MT_VEND_BUF); if (ret == MT_VEND_BUF) val = get_unaligned_le32(dev->vend_buf); else if (ret > 0) dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", ret, offset); trace_reg_read(dev, offset, val); return val; } u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { u32 ret; mutex_lock(&dev->vendor_req_mutex); ret = __mt7601u_rr(dev, offset); mutex_unlock(&dev->vendor_req_mutex); return ret; } /* should be called with vendor_req_mutex held */ static int __mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, const u16 offset, const u32 val) { int ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, val & 0xffff, offset, NULL, 0); if (!ret) ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, val >> 16, offset + 2, NULL, 0); trace_reg_write(dev, offset, val); return ret; } int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, const u16 offset, const u32 val) { int ret; mutex_lock(&dev->vendor_req_mutex); ret = __mt7601u_vendor_single_wr(dev, req, offset, val); mutex_unlock(&dev->vendor_req_mutex); return ret; } void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) { WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset); mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); } u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) { mutex_lock(&dev->vendor_req_mutex); val |= __mt7601u_rr(dev, offset) & ~mask; __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); mutex_unlock(&dev->vendor_req_mutex); return val; } u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) { u32 reg; mutex_lock(&dev->vendor_req_mutex); reg = __mt7601u_rr(dev, offset); val |= reg & ~mask; if (reg != val) __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); mutex_unlock(&dev->vendor_req_mutex); return val; } void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset, const void *data, int len) { WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset); WARN_ONCE(len & 3, "short write copy off:%08x", offset); mt7601u_burst_write_regs(dev, offset, data, len / 4); } void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr) { mt7601u_wr(dev, offset, get_unaligned_le32(addr)); mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8); } static int mt7601u_assign_pipes(struct usb_interface *usb_intf, struct mt7601u_dev *dev) { struct usb_endpoint_descriptor *ep_desc; struct usb_host_interface *intf_desc = usb_intf->cur_altsetting; unsigned i, ep_i = 0, ep_o = 0; BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX); BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX); for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc) && ep_i++ < __MT_EP_IN_MAX) { dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc); dev->in_max_packet = usb_endpoint_maxp(ep_desc); /* Note: this is ignored by usb sub-system but vendor * code does it. We can drop this at some point. */ dev->in_eps[ep_i - 1] |= USB_DIR_IN; } else if (usb_endpoint_is_bulk_out(ep_desc) && ep_o++ < __MT_EP_OUT_MAX) { dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc); dev->out_max_packet = usb_endpoint_maxp(ep_desc); } } if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) { dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n", ep_i, ep_o); return -EINVAL; } return 0; } static int mt7601u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct mt7601u_dev *dev; u32 asic_rev, mac_rev; int ret; dev = mt7601u_alloc_device(&usb_intf->dev); if (!dev) return -ENOMEM; usb_dev = usb_get_dev(usb_dev); usb_reset_device(usb_dev); usb_set_intfdata(usb_intf, dev); dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL); if (!dev->vend_buf) { ret = -ENOMEM; goto err; } ret = mt7601u_assign_pipes(usb_intf, dev); if (ret) goto err; ret = mt7601u_wait_asic_ready(dev); if (ret) goto err; asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION); mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", asic_rev, mac_rev); if ((asic_rev >> 16) != 0x7601) { ret = -ENODEV; goto err; } /* Note: vendor driver skips this check for MT7601U */ if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) dev_warn(dev->dev, "Warning: eFUSE not present\n"); ret = mt7601u_init_hardware(dev); if (ret) goto err; ret = mt7601u_register_device(dev); if (ret) goto err_hw; set_bit(MT7601U_STATE_INITIALIZED, &dev->state); return 0; err_hw: mt7601u_cleanup(dev); err: usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); destroy_workqueue(dev->stat_wq); ieee80211_free_hw(dev->hw); return ret; } static void mt7601u_disconnect(struct usb_interface *usb_intf) { struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); ieee80211_unregister_hw(dev->hw); mt7601u_cleanup(dev); usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); destroy_workqueue(dev->stat_wq); ieee80211_free_hw(dev->hw); } static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state) { struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); mt7601u_cleanup(dev); return 0; } static int mt7601u_resume(struct usb_interface *usb_intf) { struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); int ret; ret = mt7601u_init_hardware(dev); if (ret) return ret; set_bit(MT7601U_STATE_INITIALIZED, &dev->state); return 0; } MODULE_DEVICE_TABLE(usb, mt7601u_device_table); MODULE_FIRMWARE(MT7601U_FIRMWARE); MODULE_DESCRIPTION("MediaTek MT7601U USB Wireless LAN driver"); MODULE_LICENSE("GPL"); static struct usb_driver mt7601u_driver = { .name = KBUILD_MODNAME, .id_table = mt7601u_device_table, .probe = mt7601u_probe, .disconnect = mt7601u_disconnect, .suspend = mt7601u_suspend, .resume = mt7601u_resume, .reset_resume = mt7601u_resume, .soft_unbind = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(mt7601u_driver);
24 21 24 24 24 23 21 20 20 20 10 20 23 23 5 5 5 1 5 3 5 23 23 23 1 1 23 23 23 5 5 1 5 2 1 1 23 23 23 23 23 10 1 9 1 8 1 7 1 24 24 1 23 23 7 16 1 15 1 14 1 14 1 13 13 13 1 12 1 11 11 11 1 10 10 4 6 6 6 6 1 5 1 4 4 4 4 4 1 3 1 2 2 1 1 1 23 25 1 24 24 1 23 23 25 24 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 // SPDX-License-Identifier: GPL-2.0-or-later /*************************************************************************** * * Copyright (C) 2007-2010 SMSC * *****************************************************************************/ #include <linux/module.h> #include <linux/kmod.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/bitrev.h> #include <linux/crc16.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> #include <linux/slab.h> #include <linux/of_net.h> #include "smsc75xx.h" #define SMSC_CHIPNAME "smsc75xx" #define SMSC_DRIVER_VERSION "1.0.0" #define HS_USB_PKT_SIZE (512) #define FS_USB_PKT_SIZE (64) #define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) #define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE) #define DEFAULT_BULK_IN_DELAY (0x00002000) #define MAX_SINGLE_PACKET_SIZE (9000) #define LAN75XX_EEPROM_MAGIC (0x7500) #define EEPROM_MAC_OFFSET (0x01) #define DEFAULT_TX_CSUM_ENABLE (true) #define DEFAULT_RX_CSUM_ENABLE (true) #define SMSC75XX_INTERNAL_PHY_ID (1) #define SMSC75XX_TX_OVERHEAD (8) #define MAX_RX_FIFO_SIZE (20 * 1024) #define MAX_TX_FIFO_SIZE (12 * 1024) #define USB_VENDOR_ID_SMSC (0x0424) #define USB_PRODUCT_ID_LAN7500 (0x7500) #define USB_PRODUCT_ID_LAN7505 (0x7505) #define RXW_PADDING 2 #define SUPPORTED_WAKE (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \ WAKE_MCAST | WAKE_ARP | WAKE_MAGIC) #define SUSPEND_SUSPEND0 (0x01) #define SUSPEND_SUSPEND1 (0x02) #define SUSPEND_SUSPEND2 (0x04) #define SUSPEND_SUSPEND3 (0x08) #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) struct smsc75xx_priv { struct usbnet *dev; u32 rfe_ctl; u32 wolopts; u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; struct mutex dataport_mutex; spinlock_t rfe_ctl_lock; struct work_struct set_multicast; u8 suspend_flags; }; static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); static int smsc75xx_link_ok_nopm(struct usbnet *dev); static int smsc75xx_phy_gig_workaround(struct usbnet *dev); static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data, int in_pm) { u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); BUG_ON(!dev); if (!in_pm) fn = usbnet_read_cmd; else fn = usbnet_read_cmd_nopm; ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); if (unlikely(ret < 4)) { ret = ret < 0 ? ret : -ENODATA; netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); return ret; } le32_to_cpus(&buf); *data = buf; return ret; } static int __must_check __smsc75xx_write_reg(struct usbnet *dev, u32 index, u32 data, int in_pm) { u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); BUG_ON(!dev); if (!in_pm) fn = usbnet_write_cmd; else fn = usbnet_write_cmd_nopm; buf = data; cpu_to_le32s(&buf); ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); return ret; } static int __must_check smsc75xx_read_reg_nopm(struct usbnet *dev, u32 index, u32 *data) { return __smsc75xx_read_reg(dev, index, data, 1); } static int __must_check smsc75xx_write_reg_nopm(struct usbnet *dev, u32 index, u32 data) { return __smsc75xx_write_reg(dev, index, data, 1); } static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data) { return __smsc75xx_read_reg(dev, index, data, 0); } static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, u32 data) { return __smsc75xx_write_reg(dev, index, data, 0); } /* Loop until the read is completed with timeout * called with phy_mutex held */ static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev, int in_pm) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } if (!(val & MII_ACCESS_BUSY)) return 0; } while (!time_after(jiffies, start_time + HZ)); return -EIO; } static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx, int in_pm) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_read\n"); goto done; } /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_READ | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_ACCESS\n"); goto done; } ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx); goto done; } ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } ret = (u16)(val & 0xFFFF); done: mutex_unlock(&dev->phy_mutex); return ret; } static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, int regval, int in_pm) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_write\n"); goto done; } val = regval; ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_WRITE | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_ACCESS\n"); goto done; } ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx); goto done; } done: mutex_unlock(&dev->phy_mutex); } static int smsc75xx_mdio_read_nopm(struct net_device *netdev, int phy_id, int idx) { return __smsc75xx_mdio_read(netdev, phy_id, idx, 1); } static void smsc75xx_mdio_write_nopm(struct net_device *netdev, int phy_id, int idx, int regval) { __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 1); } static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) { return __smsc75xx_mdio_read(netdev, phy_id, idx, 0); } static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, int regval) { __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 0); } static int smsc75xx_wait_eeprom(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_CMD\n"); return ret; } if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) break; udelay(40); } while (!time_after(jiffies, start_time + HZ)); if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) { netdev_warn(dev->net, "EEPROM read operation timeout\n"); return -EIO; } return 0; } static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_CMD\n"); return ret; } if (!(val & E2P_CMD_BUSY)) return 0; udelay(40); } while (!time_after(jiffies, start_time + HZ)); netdev_warn(dev->net, "EEPROM is busy\n"); return -EIO; } static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; for (i = 0; i < length; i++) { val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; ret = smsc75xx_read_reg(dev, E2P_DATA, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_DATA\n"); return ret; } data[i] = val & 0xFF; offset++; } return 0; } static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; /* Issue write/erase enable command */ val = E2P_CMD_BUSY | E2P_CMD_EWEN; ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = smsc75xx_write_reg(dev, E2P_DATA, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_DATA\n"); return ret; } /* Send "write" command */ val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; offset++; } return 0; } static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev) { int i, ret; for (i = 0; i < 100; i++) { u32 dp_sel; ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error reading DP_SEL\n"); return ret; } if (dp_sel & DP_SEL_DPRDY) return 0; udelay(40); } netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out\n"); return -EIO; } static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr, u32 length, u32 *buf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 dp_sel; int i, ret; mutex_lock(&pdata->dataport_mutex); ret = smsc75xx_dataport_wait_not_busy(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_dataport_write busy on entry\n"); goto done; } ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error reading DP_SEL\n"); goto done; } dp_sel &= ~DP_SEL_RSEL; dp_sel |= ram_select; ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_SEL\n"); goto done; } for (i = 0; i < length; i++) { ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_ADDR\n"); goto done; } ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_DATA\n"); goto done; } ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_CMD\n"); goto done; } ret = smsc75xx_dataport_wait_not_busy(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_dataport_write timeout\n"); goto done; } } done: mutex_unlock(&pdata->dataport_mutex); return ret; } /* returns hash bit number for given MAC address */ static u32 smsc75xx_hash(char addr[ETH_ALEN]) { return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; } static void smsc75xx_deferred_multicast_write(struct work_struct *param) { struct smsc75xx_priv *pdata = container_of(param, struct smsc75xx_priv, set_multicast); struct usbnet *dev = pdata->dev; int ret; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", pdata->rfe_ctl); smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN, DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) netdev_warn(dev->net, "Error writing RFE_CRL\n"); } static void smsc75xx_set_multicast(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int i; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); pdata->rfe_ctl &= ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF); pdata->rfe_ctl |= RFE_CTL_AB; for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) pdata->multicast_hash_table[i] = 0; if (dev->net->flags & IFF_PROMISC) { netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU; } else if (dev->net->flags & IFF_ALLMULTI) { netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; } else if (!netdev_mc_empty(dev->net)) { struct netdev_hw_addr *ha; netif_dbg(dev, drv, dev->net, "receive multicast hash filter\n"); pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; netdev_for_each_mc_addr(ha, netdev) { u32 bitnum = smsc75xx_hash(ha->addr); pdata->multicast_hash_table[bitnum / 32] |= (1 << (bitnum % 32)); } } else { netif_dbg(dev, drv, dev->net, "receive own packets only\n"); pdata->rfe_ctl |= RFE_CTL_DPF; } spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* defer register writes to a sleepable context */ schedule_work(&pdata->set_multicast); } static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex, u16 lcladv, u16 rmtadv) { u32 flow = 0, fct_flow = 0; int ret; if (duplex == DUPLEX_FULL) { u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_TX) { flow = (FLOW_TX_FCEN | 0xFFFF); /* set fct_flow thresholds to 20% and 80% */ fct_flow = (8 << 8) | 32; } if (cap & FLOW_CTRL_RX) flow |= FLOW_RX_FCEN; netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n", (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); } else { netif_dbg(dev, link, dev->net, "half duplex\n"); } ret = smsc75xx_write_reg(dev, FLOW, flow); if (ret < 0) { netdev_warn(dev->net, "Error writing FLOW\n"); return ret; } ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); if (ret < 0) { netdev_warn(dev->net, "Error writing FCT_FLOW\n"); return ret; } return 0; } static int smsc75xx_link_reset(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; u16 lcladv, rmtadv; int ret; /* write to clear phy interrupt status */ smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, PHY_INT_SRC_CLEAR_ALL); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); if (ret < 0) { netdev_warn(dev->net, "Error writing INT_STS\n"); return ret; } mii_check_media(mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n", ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv); return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); } static void smsc75xx_status(struct usbnet *dev, struct urb *urb) { u32 intdata; if (urb->actual_length != 4) { netdev_warn(dev->net, "unexpected urb length %d\n", urb->actual_length); return; } intdata = get_unaligned_le32(urb->transfer_buffer); netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); if (intdata & INT_ENP_PHY_INT) usbnet_defer_kevent(dev, EVENT_LINK_RESET); else netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n", intdata); } static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) { return MAX_EEPROM_SIZE; } static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); ee->magic = LAN75XX_EEPROM_MAGIC; return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data); } static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); if (ee->magic != LAN75XX_EEPROM_MAGIC) { netdev_warn(dev->net, "EEPROM: magic value mismatch: 0x%x\n", ee->magic); return -EINVAL; } return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); } static void smsc75xx_ethtool_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); wolinfo->supported = SUPPORTED_WAKE; wolinfo->wolopts = pdata->wolopts; } static int smsc75xx_ethtool_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); int ret; if (wolinfo->wolopts & ~SUPPORTED_WAKE) return -EINVAL; pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); if (ret < 0) netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret); return ret; } static const struct ethtool_ops smsc75xx_ethtool_ops = { .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, .get_eeprom = smsc75xx_ethtool_get_eeprom, .set_eeprom = smsc75xx_ethtool_set_eeprom, .get_wol = smsc75xx_ethtool_get_wol, .set_wol = smsc75xx_ethtool_set_wol, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(netdev); if (!netif_running(netdev)) return -EINVAL; return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static void smsc75xx_init_mac_address(struct usbnet *dev) { u8 addr[ETH_ALEN]; /* maybe the boot loader passed the MAC address in devicetree */ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) { if (is_valid_ether_addr(dev->net->dev_addr)) { /* device tree values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n"); return; } } /* try reading mac address from EEPROM */ if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) { eth_hw_addr_set(dev->net, addr); if (is_valid_ether_addr(dev->net->dev_addr)) { /* eeprom values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n"); return; } } /* no useful static MAC address found. generate a random one */ eth_hw_addr_random(dev->net); netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); } static int smsc75xx_set_mac_address(struct usbnet *dev) { u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 | dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24; u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); if (ret < 0) { netdev_warn(dev->net, "Failed to write RX_ADDRH: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); if (ret < 0) { netdev_warn(dev->net, "Failed to write RX_ADDRL: %d\n", ret); return ret; } addr_hi |= ADDR_FILTX_FB_VALID; ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); if (ret < 0) { netdev_warn(dev->net, "Failed to write ADDR_FILTX: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); if (ret < 0) netdev_warn(dev->net, "Failed to write ADDR_FILTX+4: %d\n", ret); return ret; } static int smsc75xx_phy_initialize(struct usbnet *dev) { int bmcr, ret, timeout = 0; /* Initialize MII structure */ dev->mii.dev = dev->net; dev->mii.mdio_read = smsc75xx_mdio_read; dev->mii.mdio_write = smsc75xx_mdio_write; dev->mii.phy_id_mask = 0x1f; dev->mii.reg_num_mask = 0x1f; dev->mii.supports_gmii = 1; dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; /* reset phy and wait for reset to complete */ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); do { msleep(10); bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); if (bmcr < 0) { netdev_warn(dev->net, "Error reading MII_BMCR\n"); return bmcr; } timeout++; } while ((bmcr & BMCR_RESET) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on PHY Reset\n"); return -EIO; } /* phy workaround for gig link */ smsc75xx_phy_gig_workaround(dev); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000, ADVERTISE_1000FULL); /* read and write to clear phy interrupt status */ ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); return ret; } smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, PHY_INT_MASK_DEFAULT); mii_nway_restart(&dev->mii); netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n"); return 0; } static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) { int ret = 0; u32 buf; bool rxenabled; ret = smsc75xx_read_reg(dev, MAC_RX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); return ret; } rxenabled = ((buf & MAC_RX_RXEN) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } if (rxenabled) { buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } } return 0; } static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); int ret; ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac rx frame length\n"); return ret; } return usbnet_change_mtu(netdev, new_mtu); } /* Enable or disable Rx checksum offload engine */ static int smsc75xx_set_features(struct net_device *netdev, netdev_features_t features) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int ret; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); if (features & NETIF_F_RXCSUM) pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM; else pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM); spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* it's racing here! */ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Error writing RFE_CTL\n"); return ret; } return 0; } static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) { int timeout = 0; do { u32 buf; int ret; ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } if (buf & PMT_CTL_DEV_RDY) return 0; msleep(10); timeout++; } while (timeout < 100); netdev_warn(dev->net, "timeout waiting for device ready\n"); return -EIO; } static int smsc75xx_phy_gig_workaround(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; int ret = 0, timeout = 0; u32 buf, link_up = 0; /* Set the phy in Gig loopback */ smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040); /* Wait for the link up */ do { link_up = smsc75xx_link_ok_nopm(dev); usleep_range(10000, 20000); timeout++; } while ((!link_up) && (timeout < 1000)); if (timeout >= 1000) { netdev_warn(dev->net, "Timeout waiting for PHY link up\n"); return -EIO; } /* phy reset */ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } buf |= PMT_CTL_PHY_RST; ret = smsc75xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); return ret; } timeout = 0; do { usleep_range(10000, 20000); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } timeout++; } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); return -EIO; } return 0; } static int smsc75xx_reset(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 buf; int ret = 0, timeout; netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset\n"); ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_reset\n"); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } buf |= HW_CFG_LRST; ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } timeout++; } while ((buf & HW_CFG_LRST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on completion of Lite Reset\n"); return -EIO; } netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY\n"); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } buf |= PMT_CTL_PHY_RST; ret = smsc75xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); return ret; } timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } timeout++; } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); return -EIO; } netif_dbg(dev, ifup, dev->net, "PHY reset complete\n"); ret = smsc75xx_set_mac_address(dev); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac address\n"); return ret; } netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n", dev->net->dev_addr); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n", buf); buf |= HW_CFG_BIR; ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR: 0x%08x\n", buf); if (!turbo_mode) { buf = 0; dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE; } else if (dev->udev->speed == USB_SPEED_HIGH) { buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE; } else { buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; } netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size); ret = smsc75xx_write_reg(dev, BURST_CAP, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from BURST_CAP after writing: 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); if (ret < 0) { netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from BULK_IN_DLY after writing: 0x%08x\n", buf); if (turbo_mode) { ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); buf |= (HW_CFG_MEF | HW_CFG_BCE); ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); } /* set FIFO sizes */ buf = (MAX_RX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_RX_FIFO_END: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x\n", buf); buf = (MAX_TX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_TX_FIFO_END: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); if (ret < 0) { netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, ID_REV, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, E2P_CMD, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read E2P_CMD: %d\n", ret); return ret; } /* only set default GPIO/LED settings if no EEPROM is detected */ if (!(buf & E2P_CMD_LOADED)) { ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read LED_GPIO_CFG: %d\n", ret); return ret; } buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret); return ret; } } ret = smsc75xx_write_reg(dev, FLOW, 0); if (ret < 0) { netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_FLOW: %d\n", ret); return ret; } /* Don't need rfe_ctl_lock during initialisation */ ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); return ret; } pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to write RFE_CTL: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x\n", pdata->rfe_ctl); /* Enable or disable checksum offload engines */ smsc75xx_set_features(dev->net, dev->net->features); smsc75xx_set_multicast(dev->net); ret = smsc75xx_phy_initialize(dev); if (ret < 0) { netdev_warn(dev->net, "Failed to initialize PHY: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret); return ret; } /* enable PHY interrupts */ buf |= INT_ENP_PHY_INT; ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret); return ret; } /* allow mac to detect speed and duplex from phy */ ret = smsc75xx_read_reg(dev, MAC_CR, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret); return ret; } buf |= (MAC_CR_ADD | MAC_CR_ASD); ret = smsc75xx_write_reg(dev, MAC_CR, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, MAC_TX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_TX: %d\n", ret); return ret; } buf |= MAC_TX_TXEN; ret = smsc75xx_write_reg(dev, MAC_TX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_TX: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read FCT_TX_CTL: %d\n", ret); return ret; } buf |= FCT_TX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_TX_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set max rx frame length\n"); return ret; } ret = smsc75xx_read_reg(dev, MAC_RX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); return ret; } buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read FCT_RX_CTL: %d\n", ret); return ret; } buf |= FCT_RX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_RX_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x\n", buf); netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0\n"); return 0; } static const struct net_device_ops smsc75xx_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = smsc75xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = smsc75xx_ioctl, .ndo_set_rx_mode = smsc75xx_set_multicast, .ndo_set_features = smsc75xx_set_features, }; static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = NULL; int ret; printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); ret = usbnet_get_endpoints(dev, intf); if (ret < 0) { netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret); return ret; } dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), GFP_KERNEL); pdata = (struct smsc75xx_priv *)(dev->data[0]); if (!pdata) return -ENOMEM; pdata->dev = dev; spin_lock_init(&pdata->rfe_ctl_lock); mutex_init(&pdata->dataport_mutex); INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); if (DEFAULT_TX_CSUM_ENABLE) dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; if (DEFAULT_RX_CSUM_ENABLE) dev->net->features |= NETIF_F_RXCSUM; dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_bind\n"); goto free_pdata; } smsc75xx_init_mac_address(dev); /* Init all registers */ ret = smsc75xx_reset(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); goto cancel_work; } dev->net->netdev_ops = &smsc75xx_netdev_ops; dev->net->ethtool_ops = &smsc75xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE; return 0; cancel_work: cancel_work_sync(&pdata->set_multicast); free_pdata: kfree(pdata); dev->data[0] = 0; return ret; } static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); if (pdata) { cancel_work_sync(&pdata->set_multicast); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); dev->data[0] = 0; } } static u16 smsc_crc(const u8 *buffer, size_t len) { return bitrev16(crc16(0xFFFF, buffer, len)); } static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg, u32 wuf_mask1) { int cfg_base = WUF_CFGX + filter * 4; int mask_base = WUF_MASKX + filter * 16; int ret; ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_CFGX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 4, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 8, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 12, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } return 0; } static int smsc75xx_enter_suspend0(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST)); val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND0; return 0; } static int smsc75xx_enter_suspend1(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_1; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND1; return 0; } static int smsc75xx_enter_suspend2(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_2; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND2; return 0; } static int smsc75xx_enter_suspend3(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading FCT_RX_CTL\n"); return ret; } if (val & FCT_RX_CTL_RXUSED) { netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n"); return -EBUSY; } ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } /* clear wol status */ val &= ~PMT_CTL_WUPS; val |= PMT_CTL_WUPS_WOL; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND3; return 0; } static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) { struct mii_if_info *mii = &dev->mii; int ret; netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n"); /* read to clear */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); return ret; } /* enable interrupt source */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_MASK\n"); return ret; } ret |= mask; smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret); return 0; } static int smsc75xx_link_ok_nopm(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; int ret; /* first, a dummy read, needed to latch some MII phys */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_BMSR\n"); return ret; } ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_BMSR\n"); return ret; } return !!(ret & BMSR_LSTATUS); } static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up) { int ret; if (!netif_running(dev->net)) { /* interface is ifconfig down so fully power down hw */ netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n"); return smsc75xx_enter_suspend2(dev); } if (!link_up) { /* link is down so enter EDPD mode */ netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n"); /* enable PHY wakeup events for if cable is attached */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_ANEG_COMP); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); return ret; } netdev_info(dev->net, "entering SUSPEND1 mode\n"); return smsc75xx_enter_suspend1(dev); } /* enable PHY wakeup events so we remote wakeup if cable is pulled */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_LINK_DOWN); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); return ret; } netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n"); return smsc75xx_enter_suspend3(dev); } static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val, link_up; int ret; ret = usbnet_suspend(intf, message); if (ret < 0) { netdev_warn(dev->net, "usbnet_suspend error\n"); return ret; } if (pdata->suspend_flags) { netdev_warn(dev->net, "error during last resume\n"); pdata->suspend_flags = 0; } /* determine if link is up using only _nopm functions */ link_up = smsc75xx_link_ok_nopm(dev); if (message.event == PM_EVENT_AUTO_SUSPEND) { ret = smsc75xx_autosuspend(dev, link_up); goto done; } /* if we get this far we're not autosuspending */ /* if no wol options set, or if link is down and we're not waking on * PHY activity, enter lowest power SUSPEND2 mode */ if (!(pdata->wolopts & SUPPORTED_WAKE) || !(link_up || (pdata->wolopts & WAKE_PHY))) { netdev_info(dev->net, "entering SUSPEND2 mode\n"); /* disable energy detect (link up) & wake up events */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~(WUCSR_MPEN | WUCSR_WUEN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); goto done; } val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); goto done; } ret = smsc75xx_enter_suspend2(dev); goto done; } if (pdata->wolopts & WAKE_PHY) { ret = smsc75xx_enable_phy_wakeup_interrupts(dev, (PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN)); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); goto done; } /* if link is down then configure EDPD and enter SUSPEND1, * otherwise enter SUSPEND0 below */ if (!link_up) { struct mii_if_info *mii = &dev->mii; netdev_info(dev->net, "entering SUSPEND1 mode\n"); /* enable energy detect power-down mode */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n"); goto done; } ret |= MODE_CTRL_STS_EDPWRDOWN; smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret); /* enter SUSPEND1 mode */ ret = smsc75xx_enter_suspend1(dev); goto done; } } if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) { int i, filter = 0; /* disable all filters */ for (i = 0; i < WUF_NUM; i++) { ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_CFGX\n"); goto done; } } if (pdata->wolopts & WAKE_MCAST) { const u8 mcast[] = {0x01, 0x00, 0x5E}; netdev_info(dev->net, "enabling multicast detection\n"); val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST | smsc_crc(mcast, 3); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007); if (ret < 0) { netdev_warn(dev->net, "Error writing wakeup filter\n"); goto done; } } if (pdata->wolopts & WAKE_ARP) { const u8 arp[] = {0x08, 0x06}; netdev_info(dev->net, "enabling ARP detection\n"); val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16) | smsc_crc(arp, 2); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003); if (ret < 0) { netdev_warn(dev->net, "Error writing wakeup filter\n"); goto done; } } /* clear any pending pattern match packet status */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUFR; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } netdev_info(dev->net, "enabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } else { netdev_info(dev->net, "disabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } /* disable magic, bcast & unicast wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } if (pdata->wolopts & WAKE_PHY) { netdev_info(dev->net, "enabling PHY wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); goto done; } /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); goto done; } } if (pdata->wolopts & WAKE_MAGIC) { netdev_info(dev->net, "enabling magic packet wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } /* clear any pending magic packet status */ val |= WUCSR_MPR | WUCSR_MPEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } if (pdata->wolopts & WAKE_BCAST) { netdev_info(dev->net, "enabling broadcast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_BCAST_FR | WUCSR_BCST_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } if (pdata->wolopts & WAKE_UCAST) { netdev_info(dev->net, "enabling unicast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUFR | WUCSR_PFDA_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } /* enable receiver to enable frame reception */ ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); goto done; } val |= MAC_RX_RXEN; ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); goto done; } /* some wol options are enabled, so enter SUSPEND0 */ netdev_info(dev->net, "entering SUSPEND0 mode\n"); ret = smsc75xx_enter_suspend0(dev); done: /* * TODO: resume() might need to handle the suspend failure * in system sleep */ if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); return ret; } static int smsc75xx_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u8 suspend_flags = pdata->suspend_flags; int ret; u32 val; netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags); /* do this first to ensure it's cleared even in error case */ pdata->suspend_flags = 0; if (suspend_flags & SUSPEND_ALLMODES) { /* Disable wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); return ret; } val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN | WUCSR_BCST_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); return ret; } /* clear wake-up status */ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~PMT_CTL_WOL_EN; val |= PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } } if (suspend_flags & SUSPEND_SUSPEND2) { netdev_info(dev->net, "resuming from SUSPEND2\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val |= PMT_CTL_PHY_PWRUP; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } } ret = smsc75xx_wait_ready(dev, 1); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_resume\n"); return ret; } return usbnet_resume(intf); } static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { if (!(dev->net->features & NETIF_F_RXCSUM) || unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); skb->ip_summed = CHECKSUM_COMPLETE; } } static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { /* This check is no longer done by usbnet */ if (skb->len < dev->net->hard_header_len) return 0; while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; struct sk_buff *ax_skb; unsigned char *packet; rx_cmd_a = get_unaligned_le32(skb->data); skb_pull(skb, 4); rx_cmd_b = get_unaligned_le32(skb->data); skb_pull(skb, 4 + RXW_PADDING); packet = skb->data; /* get the packet length */ size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; if (unlikely(size > skb->len)) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); return 0; } if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); dev->net->stats.rx_errors++; dev->net->stats.rx_dropped++; if (rx_cmd_a & RX_CMD_A_FCS) dev->net->stats.rx_crc_errors++; else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); return 0; } /* last frame in this batch */ if (skb->len == size) { smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a, rx_cmd_b); skb_trim(skb, skb->len - 4); /* remove fcs */ return 1; } /* Use "size - 4" to remove fcs */ ax_skb = netdev_alloc_skb_ip_align(dev->net, size - 4); if (unlikely(!ax_skb)) { netdev_warn(dev->net, "Error allocating skb\n"); return 0; } skb_put(ax_skb, size - 4); memcpy(ax_skb->data, packet, size - 4); smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a, rx_cmd_b); usbnet_skb_return(dev, ax_skb); } skb_pull(skb, size); /* padding bytes before the next frame starts */ if (skb->len) skb_pull(skb, align_count); } return 1; } static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; void *ptr; if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) { dev_kfree_skb_any(skb); return NULL; } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; if (skb->ip_summed == CHECKSUM_PARTIAL) tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE; if (skb_is_gso(skb)) { u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN); tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS; tx_cmd_a |= TX_CMD_A_LSO; } else { tx_cmd_b = 0; } ptr = skb_push(skb, 8); put_unaligned_le32(tx_cmd_a, ptr); put_unaligned_le32(tx_cmd_b, ptr + 4); return skb; } static int smsc75xx_manage_power(struct usbnet *dev, int on) { dev->intf->needs_remote_wakeup = on; return 0; } static const struct driver_info smsc75xx_info = { .description = "smsc75xx USB 2.0 Gigabit Ethernet", .bind = smsc75xx_bind, .unbind = smsc75xx_unbind, .link_reset = smsc75xx_link_reset, .reset = smsc75xx_reset, .rx_fixup = smsc75xx_rx_fixup, .tx_fixup = smsc75xx_tx_fixup, .status = smsc75xx_status, .manage_power = smsc75xx_manage_power, .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, }; static const struct usb_device_id products[] = { { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500), .driver_info = (unsigned long) &smsc75xx_info, }, { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505), .driver_info = (unsigned long) &smsc75xx_info, }, { }, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver smsc75xx_driver = { .name = SMSC_CHIPNAME, .id_table = products, .probe = usbnet_probe, .suspend = smsc75xx_suspend, .resume = smsc75xx_resume, .reset_resume = smsc75xx_resume, .disconnect = usbnet_disconnect, .disable_hub_initiated_lpm = 1, .supports_autosuspend = 1, }; module_usb_driver(smsc75xx_driver); MODULE_AUTHOR("Nancy Lin"); MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>"); MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices"); MODULE_LICENSE("GPL");
83 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 /* SPDX-License-Identifier: GPL-2.0 */ /* * * Definitions for mount interface. This describes the in the kernel build * linkedlist with mounted filesystems. * * Author: Marco van Wieringen <mvw@planets.elm.net> * */ #ifndef _LINUX_MOUNT_H #define _LINUX_MOUNT_H #include <linux/types.h> #include <asm/barrier.h> struct super_block; struct dentry; struct user_namespace; struct mnt_idmap; struct file_system_type; struct fs_context; struct file; struct path; #define MNT_NOSUID 0x01 #define MNT_NODEV 0x02 #define MNT_NOEXEC 0x04 #define MNT_NOATIME 0x08 #define MNT_NODIRATIME 0x10 #define MNT_RELATIME 0x20 #define MNT_READONLY 0x40 /* does the user want this to be r/o? */ #define MNT_NOSYMFOLLOW 0x80 #define MNT_SHRINKABLE 0x100 #define MNT_WRITE_HOLD 0x200 #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ /* * MNT_SHARED_MASK is the set of flags that should be cleared when a * mount becomes shared. Currently, this is only the flag that says a * mount cannot be bind mounted, since this is how we create a mount * that shares events with another mount. If you add a new MNT_* * flag, consider how it interacts with shared mounts. */ #define MNT_SHARED_MASK (MNT_UNBINDABLE) #define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \ | MNT_READONLY | MNT_NOSYMFOLLOW) #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) #define MNT_INTERNAL 0x4000 #define MNT_LOCK_ATIME 0x040000 #define MNT_LOCK_NOEXEC 0x080000 #define MNT_LOCK_NOSUID 0x100000 #define MNT_LOCK_NODEV 0x200000 #define MNT_LOCK_READONLY 0x400000 #define MNT_LOCKED 0x800000 #define MNT_DOOMED 0x1000000 #define MNT_SYNC_UMOUNT 0x2000000 #define MNT_MARKED 0x4000000 #define MNT_UMOUNT 0x8000000 struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; struct mnt_idmap *mnt_idmap; } __randomize_layout; static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt) { /* Pairs with smp_store_release() in do_idmap_mount(). */ return READ_ONCE(mnt->mnt_idmap); } extern int mnt_want_write(struct vfsmount *mnt); extern int mnt_want_write_file(struct file *file); extern void mnt_drop_write(struct vfsmount *mnt); extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); extern void mnt_make_shortterm(struct vfsmount *mnt); extern struct vfsmount *mnt_clone_internal(const struct path *path); extern bool __mnt_is_readonly(struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); extern struct vfsmount *clone_private_mount(const struct path *path); int mnt_get_write_access(struct vfsmount *mnt); void mnt_put_write_access(struct vfsmount *mnt); extern struct vfsmount *fc_mount(struct fs_context *fc); extern struct vfsmount *vfs_create_mount(struct fs_context *fc); extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); extern struct vfsmount *vfs_submount(const struct dentry *mountpoint, struct file_system_type *type, const char *name, void *data); extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); extern void mark_mounts_for_expiry(struct list_head *mounts); extern bool path_is_mountpoint(const struct path *path); extern bool our_mnt(struct vfsmount *mnt); extern struct vfsmount *kern_mount(struct file_system_type *); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); int do_mount(const char *, const char __user *, const char *, unsigned long, void *); extern struct vfsmount *collect_mounts(const struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, struct vfsmount *); extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); extern int cifs_root_data(char **dev, char **opts); #endif /* _LINUX_MOUNT_H */
7 13 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SCSI_DISK_H #define _SCSI_DISK_H /* * More than enough for everybody ;) The huge number of majors * is a leftover from 16bit dev_t days, we don't really need that * much numberspace. */ #define SD_MAJORS 16 /* * Time out in seconds for disks and Magneto-opticals (which are slower). */ #define SD_TIMEOUT (30 * HZ) #define SD_MOD_TIMEOUT (75 * HZ) /* * Flush timeout is a multiplier over the standard device timeout which is * user modifiable via sysfs but initially set to SD_TIMEOUT */ #define SD_FLUSH_TIMEOUT_MULTIPLIER 2 #define SD_WRITE_SAME_TIMEOUT (120 * HZ) /* * Number of allowed retries */ #define SD_MAX_RETRIES 5 #define SD_PASSTHROUGH_RETRIES 1 #define SD_MAX_MEDIUM_TIMEOUTS 2 /* * Size of the initial data buffer for mode and read capacity data */ #define SD_BUF_SIZE 512 /* * Number of sectors at the end of the device to avoid multi-sector * accesses to in the case of last_sector_bug */ #define SD_LAST_BUGGY_SECTORS 8 enum { SD_EXT_CDB_SIZE = 32, /* Extended CDB size */ SD_MEMPOOL_SIZE = 2, /* CDB pool size */ }; enum { SD_DEF_XFER_BLOCKS = 0xffff, SD_MAX_XFER_BLOCKS = 0xffffffff, SD_MAX_WS10_BLOCKS = 0xffff, SD_MAX_WS16_BLOCKS = 0x7fffff, }; enum { SD_LBP_FULL = 0, /* Full logical block provisioning */ SD_LBP_UNMAP, /* Use UNMAP command */ SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */ SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */ SD_LBP_DISABLE, /* Discard disabled due to failed cmd */ }; enum { SD_ZERO_WRITE = 0, /* Use WRITE(10/16) command */ SD_ZERO_WS, /* Use WRITE SAME(10/16) command */ SD_ZERO_WS16_UNMAP, /* Use WRITE SAME(16) with UNMAP */ SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */ }; /** * struct zoned_disk_info - Specific properties of a ZBC SCSI device. * @nr_zones: number of zones. * @zone_blocks: number of logical blocks per zone. * * This data structure holds the ZBC SCSI device properties that are retrieved * twice: a first time before the gendisk capacity is known and a second time * after the gendisk capacity is known. */ struct zoned_disk_info { u32 nr_zones; u32 zone_blocks; }; struct scsi_disk { struct scsi_device *device; /* * disk_dev is used to show attributes in /sys/class/scsi_disk/, * but otherwise not really needed. Do not use for refcounting. */ struct device disk_dev; struct gendisk *disk; struct opal_dev *opal_dev; #ifdef CONFIG_BLK_DEV_ZONED /* Updated during revalidation before the gendisk capacity is known. */ struct zoned_disk_info early_zone_info; /* Updated during revalidation after the gendisk capacity is known. */ struct zoned_disk_info zone_info; u32 zones_optimal_open; u32 zones_optimal_nonseq; u32 zones_max_open; /* * Either zero or a power of two. If not zero it means that the offset * between zone starting LBAs is constant. */ u32 zone_starting_lba_gran; #endif atomic_t openers; sector_t capacity; /* size in logical blocks */ int max_retries; u32 min_xfer_blocks; u32 max_xfer_blocks; u32 opt_xfer_blocks; u32 max_ws_blocks; u32 max_unmap_blocks; u32 unmap_granularity; u32 unmap_alignment; u32 max_atomic; u32 atomic_alignment; u32 atomic_granularity; u32 max_atomic_with_boundary; u32 max_atomic_boundary; u32 index; unsigned int physical_block_size; unsigned int max_medium_access_timeouts; unsigned int medium_access_timed_out; /* number of permanent streams */ u16 permanent_stream_count; u8 media_present; u8 write_prot; u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; u8 zeroing_mode; u8 nr_actuators; /* Number of actuators */ bool suspended; /* Disk is suspended (stopped) */ unsigned ATO : 1; /* state of disk ATO bit */ unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ unsigned first_scan : 1; unsigned lbpme : 1; unsigned lbprz : 1; unsigned lbpu : 1; unsigned lbpws : 1; unsigned lbpws10 : 1; unsigned lbpvpd : 1; unsigned ws10 : 1; unsigned ws16 : 1; unsigned rc_basis: 2; unsigned zoned: 2; unsigned urswrz : 1; unsigned security : 1; unsigned ignore_medium_access_errors : 1; unsigned rscs : 1; /* reduced stream control support */ unsigned use_atomic_write_boundary : 1; }; #define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev) static inline struct scsi_disk *scsi_disk(struct gendisk *disk) { return disk->private_data; } #define sd_printk(prefix, sdsk, fmt, a...) \ (sdsk)->disk ? \ sdev_prefix_printk(prefix, (sdsk)->device, \ (sdsk)->disk->disk_name, fmt, ##a) : \ sdev_printk(prefix, (sdsk)->device, fmt, ##a) #define sd_first_printk(prefix, sdsk, fmt, a...) \ do { \ if ((sdsk)->first_scan) \ sd_printk(prefix, sdsk, fmt, ##a); \ } while (0) static inline int scsi_medium_access_command(struct scsi_cmnd *scmd) { switch (scmd->cmnd[0]) { case READ_6: case READ_10: case READ_12: case READ_16: case SYNCHRONIZE_CACHE: case VERIFY: case VERIFY_12: case VERIFY_16: case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_SAME: case WRITE_SAME_16: case UNMAP: return 1; case VARIABLE_LENGTH_CMD: switch (scmd->cmnd[9]) { case READ_32: case VERIFY_32: case WRITE_32: case WRITE_SAME_32: return 1; } } return 0; } static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks) { return blocks << (ilog2(sdev->sector_size) - 9); } static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) { return blocks * sdev->sector_size; } static inline sector_t bytes_to_logical(struct scsi_device *sdev, unsigned int bytes) { return bytes >> ilog2(sdev->sector_size); } static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector) { return sector >> (ilog2(sdev->sector_size) - 9); } void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim); #ifdef CONFIG_BLK_DEV_ZONED int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, u8 buf[SD_BUF_SIZE]); int sd_zbc_revalidate_zones(struct scsi_disk *sdkp); blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, bool all); unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr); int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); #else /* CONFIG_BLK_DEV_ZONED */ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, struct queue_limits *lim, u8 buf[SD_BUF_SIZE]) { return 0; } static inline int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) { return 0; } static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, bool all) { return BLK_STS_TARGET; } static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr) { return good_bytes; } #define sd_zbc_report_zones NULL #endif /* CONFIG_BLK_DEV_ZONED */ void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr); void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result); #endif /* _SCSI_DISK_H */
5 5 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_TRAPS_H #define _ASM_X86_TRAPS_H #include <linux/context_tracking_state.h> #include <linux/kprobes.h> #include <asm/debugreg.h> #include <asm/idtentry.h> #include <asm/siginfo.h> /* TRAP_TRACE, ... */ #include <asm/trap_pf.h> #ifdef CONFIG_X86_64 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); asmlinkage __visible notrace struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs); asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs); #endif extern int ibt_selftest(void); extern int ibt_selftest_noendbr(void); #ifdef CONFIG_X86_F00F_BUG /* For handling the FOOF bug */ void handle_invalid_op(struct pt_regs *regs); #endif static inline int get_si_code(unsigned long condition) { if (condition & DR_STEP) return TRAP_TRACE; else if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) return TRAP_HWBKPT; else return TRAP_BRKPT; } void math_emulate(struct math_emu_info *); bool fault_in_kernel_space(unsigned long address); #ifdef CONFIG_VMAP_STACK void __noreturn handle_stack_overflow(struct pt_regs *regs, unsigned long fault_address, struct stack_info *info); #endif static inline void cond_local_irq_enable(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } static inline void cond_local_irq_disable(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); } #endif /* _ASM_X86_TRAPS_H */
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 /* SPDX-License-Identifier: GPL-2.0-only */ /* Driver for Realtek RTS5139 USB card reader * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * * Author: * Roger Tseng <rogerable@realtek.com> */ #ifndef __RTSX_USB_H #define __RTSX_USB_H #include <linux/usb.h> #define DRV_NAME_RTSX_USB "rtsx_usb" #define DRV_NAME_RTSX_USB_SDMMC "rtsx_usb_sdmmc" #define DRV_NAME_RTSX_USB_MS "rtsx_usb_ms" /* related module names */ #define RTSX_USB_SD_CARD 0 #define RTSX_USB_MS_CARD 1 /* endpoint numbers */ #define EP_BULK_OUT 1 #define EP_BULK_IN 2 #define EP_INTR_IN 3 /* USB vendor requests */ #define RTSX_USB_REQ_REG_OP 0x00 #define RTSX_USB_REQ_POLL 0x02 /* miscellaneous parameters */ #define MIN_DIV_N 60 #define MAX_DIV_N 120 #define MAX_PHASE 15 #define RX_TUNING_CNT 3 #define QFN24 0 #define LQFP48 1 #define CHECK_PKG(ucr, pkg) ((ucr)->package == (pkg)) /* data structures */ struct rtsx_ucr { u16 vendor_id; u16 product_id; int package; u8 ic_version; bool is_rts5179; unsigned int cur_clk; u8 *cmd_buf; unsigned int cmd_idx; u8 *rsp_buf; struct usb_device *pusb_dev; struct usb_interface *pusb_intf; struct usb_sg_request current_sg; struct timer_list sg_timer; struct mutex dev_mutex; }; /* buffer size */ #define IOBUF_SIZE 1024 /* prototypes of exported functions */ extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status); extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data); extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data); extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr, u8 mask, u8 data); extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout); extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout); extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe, void *buf, unsigned int len, int use_sg, unsigned int *act_len, int timeout); extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len); extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock, u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card); /* card status */ #define SD_CD 0x01 #define MS_CD 0x02 #define XD_CD 0x04 #define CD_MASK (SD_CD | MS_CD | XD_CD) #define SD_WP 0x08 /* reader command field offset & parameters */ #define READ_REG_CMD 0 #define WRITE_REG_CMD 1 #define CHECK_REG_CMD 2 #define PACKET_TYPE 4 #define CNT_H 5 #define CNT_L 6 #define STAGE_FLAG 7 #define CMD_OFFSET 8 #define SEQ_WRITE_DATA_OFFSET 12 #define BATCH_CMD 0 #define SEQ_READ 1 #define SEQ_WRITE 2 #define STAGE_R 0x01 #define STAGE_DI 0x02 #define STAGE_DO 0x04 #define STAGE_MS_STATUS 0x08 #define STAGE_XD_STATUS 0x10 #define MODE_C 0x00 #define MODE_CR (STAGE_R) #define MODE_CDIR (STAGE_R | STAGE_DI) #define MODE_CDOR (STAGE_R | STAGE_DO) #define EP0_OP_SHIFT 14 #define EP0_READ_REG_CMD 2 #define EP0_WRITE_REG_CMD 3 #define rtsx_usb_cmd_hdr_tag(ucr) \ do { \ ucr->cmd_buf[0] = 'R'; \ ucr->cmd_buf[1] = 'T'; \ ucr->cmd_buf[2] = 'C'; \ ucr->cmd_buf[3] = 'R'; \ } while (0) static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr) { rtsx_usb_cmd_hdr_tag(ucr); ucr->cmd_idx = 0; ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD; } /* internal register address */ #define FPDCTL 0xFC00 #define SSC_DIV_N_0 0xFC07 #define SSC_CTL1 0xFC09 #define SSC_CTL2 0xFC0A #define CFG_MODE 0xFC0E #define CFG_MODE_1 0xFC0F #define RCCTL 0xFC14 #define SOF_WDOG 0xFC28 #define SYS_DUMMY0 0xFC30 #define MS_BLKEND 0xFD30 #define MS_READ_START 0xFD31 #define MS_READ_COUNT 0xFD32 #define MS_WRITE_START 0xFD33 #define MS_WRITE_COUNT 0xFD34 #define MS_COMMAND 0xFD35 #define MS_OLD_BLOCK_0 0xFD36 #define MS_OLD_BLOCK_1 0xFD37 #define MS_NEW_BLOCK_0 0xFD38 #define MS_NEW_BLOCK_1 0xFD39 #define MS_LOG_BLOCK_0 0xFD3A #define MS_LOG_BLOCK_1 0xFD3B #define MS_BUS_WIDTH 0xFD3C #define MS_PAGE_START 0xFD3D #define MS_PAGE_LENGTH 0xFD3E #define MS_CFG 0xFD40 #define MS_TPC 0xFD41 #define MS_TRANS_CFG 0xFD42 #define MS_TRANSFER 0xFD43 #define MS_INT_REG 0xFD44 #define MS_BYTE_CNT 0xFD45 #define MS_SECTOR_CNT_L 0xFD46 #define MS_SECTOR_CNT_H 0xFD47 #define MS_DBUS_H 0xFD48 #define CARD_DMA1_CTL 0xFD5C #define CARD_PULL_CTL1 0xFD60 #define CARD_PULL_CTL2 0xFD61 #define CARD_PULL_CTL3 0xFD62 #define CARD_PULL_CTL4 0xFD63 #define CARD_PULL_CTL5 0xFD64 #define CARD_PULL_CTL6 0xFD65 #define CARD_EXIST 0xFD6F #define CARD_INT_PEND 0xFD71 #define LDO_POWER_CFG 0xFD7B #define SD_CFG1 0xFDA0 #define SD_CFG2 0xFDA1 #define SD_CFG3 0xFDA2 #define SD_STAT1 0xFDA3 #define SD_STAT2 0xFDA4 #define SD_BUS_STAT 0xFDA5 #define SD_PAD_CTL 0xFDA6 #define SD_SAMPLE_POINT_CTL 0xFDA7 #define SD_PUSH_POINT_CTL 0xFDA8 #define SD_CMD0 0xFDA9 #define SD_CMD1 0xFDAA #define SD_CMD2 0xFDAB #define SD_CMD3 0xFDAC #define SD_CMD4 0xFDAD #define SD_CMD5 0xFDAE #define SD_BYTE_CNT_L 0xFDAF #define SD_BYTE_CNT_H 0xFDB0 #define SD_BLOCK_CNT_L 0xFDB1 #define SD_BLOCK_CNT_H 0xFDB2 #define SD_TRANSFER 0xFDB3 #define SD_CMD_STATE 0xFDB5 #define SD_DATA_STATE 0xFDB6 #define SD_VPCLK0_CTL 0xFC2A #define SD_VPCLK1_CTL 0xFC2B #define SD_DCMPS0_CTL 0xFC2C #define SD_DCMPS1_CTL 0xFC2D #define CARD_DMA1_CTL 0xFD5C #define HW_VERSION 0xFC01 #define SSC_CLK_FPGA_SEL 0xFC02 #define CLK_DIV 0xFC03 #define SFSM_ED 0xFC04 #define CD_DEGLITCH_WIDTH 0xFC20 #define CD_DEGLITCH_EN 0xFC21 #define AUTO_DELINK_EN 0xFC23 #define FPGA_PULL_CTL 0xFC1D #define CARD_CLK_SOURCE 0xFC2E #define CARD_SHARE_MODE 0xFD51 #define CARD_DRIVE_SEL 0xFD52 #define CARD_STOP 0xFD53 #define CARD_OE 0xFD54 #define CARD_AUTO_BLINK 0xFD55 #define CARD_GPIO 0xFD56 #define SD30_DRIVE_SEL 0xFD57 #define CARD_DATA_SOURCE 0xFD5D #define CARD_SELECT 0xFD5E #define CARD_CLK_EN 0xFD79 #define CARD_PWR_CTL 0xFD7A #define OCPCTL 0xFD80 #define OCPPARA1 0xFD81 #define OCPPARA2 0xFD82 #define OCPSTAT 0xFD83 #define HS_USB_STAT 0xFE01 #define HS_VCONTROL 0xFE26 #define HS_VSTAIN 0xFE27 #define HS_VLOADM 0xFE28 #define HS_VSTAOUT 0xFE29 #define MC_IRQ 0xFF00 #define MC_IRQEN 0xFF01 #define MC_FIFO_CTL 0xFF02 #define MC_FIFO_BC0 0xFF03 #define MC_FIFO_BC1 0xFF04 #define MC_FIFO_STAT 0xFF05 #define MC_FIFO_MODE 0xFF06 #define MC_FIFO_RD_PTR0 0xFF07 #define MC_FIFO_RD_PTR1 0xFF08 #define MC_DMA_CTL 0xFF10 #define MC_DMA_TC0 0xFF11 #define MC_DMA_TC1 0xFF12 #define MC_DMA_TC2 0xFF13 #define MC_DMA_TC3 0xFF14 #define MC_DMA_RST 0xFF15 #define RBUF_SIZE_MASK 0xFBFF #define RBUF_BASE 0xF000 #define PPBUF_BASE1 0xF800 #define PPBUF_BASE2 0xFA00 /* internal register value macros */ #define POWER_OFF 0x03 #define PARTIAL_POWER_ON 0x02 #define POWER_ON 0x00 #define POWER_MASK 0x03 #define LDO3318_PWR_MASK 0x0C #define LDO_ON 0x00 #define LDO_SUSPEND 0x08 #define LDO_OFF 0x0C #define DV3318_AUTO_PWR_OFF 0x10 #define FORCE_LDO_POWERB 0x60 /* LDO_POWER_CFG */ #define TUNE_SD18_MASK 0x1C #define TUNE_SD18_1V7 0x00 #define TUNE_SD18_1V8 (0x01 << 2) #define TUNE_SD18_1V9 (0x02 << 2) #define TUNE_SD18_2V0 (0x03 << 2) #define TUNE_SD18_2V7 (0x04 << 2) #define TUNE_SD18_2V8 (0x05 << 2) #define TUNE_SD18_2V9 (0x06 << 2) #define TUNE_SD18_3V3 (0x07 << 2) /* CLK_DIV */ #define CLK_CHANGE 0x80 #define CLK_DIV_1 0x00 #define CLK_DIV_2 0x01 #define CLK_DIV_4 0x02 #define CLK_DIV_8 0x03 #define SSC_POWER_MASK 0x01 #define SSC_POWER_DOWN 0x01 #define SSC_POWER_ON 0x00 #define FPGA_VER 0x80 #define HW_VER_MASK 0x0F #define EXTEND_DMA1_ASYNC_SIGNAL 0x02 /* CFG_MODE*/ #define XTAL_FREE 0x80 #define CLK_MODE_MASK 0x03 #define CLK_MODE_12M_XTAL 0x00 #define CLK_MODE_NON_XTAL 0x01 #define CLK_MODE_24M_OSC 0x02 #define CLK_MODE_48M_OSC 0x03 /* CFG_MODE_1*/ #define RTS5179 0x02 #define NYET_EN 0x01 #define NYET_MSAK 0x01 #define SD30_DRIVE_MASK 0x07 #define SD20_DRIVE_MASK 0x03 #define DISABLE_SD_CD 0x08 #define DISABLE_MS_CD 0x10 #define DISABLE_XD_CD 0x20 #define SD_CD_DEGLITCH_EN 0x01 #define MS_CD_DEGLITCH_EN 0x02 #define XD_CD_DEGLITCH_EN 0x04 #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SD30_DRIVE_SEL */ #define DRIVER_TYPE_A 0x05 #define DRIVER_TYPE_B 0x03 #define DRIVER_TYPE_C 0x02 #define DRIVER_TYPE_D 0x01 /* SD_BUS_STAT */ #define SD_CLK_TOGGLE_EN 0x80 #define SD_CLK_FORCE_STOP 0x40 #define SD_DAT3_STATUS 0x10 #define SD_DAT2_STATUS 0x08 #define SD_DAT1_STATUS 0x04 #define SD_DAT0_STATUS 0x02 #define SD_CMD_STATUS 0x01 /* SD_PAD_CTL */ #define SD_IO_USING_1V8 0x80 #define SD_IO_USING_3V3 0x7F #define TYPE_A_DRIVING 0x00 #define TYPE_B_DRIVING 0x01 #define TYPE_C_DRIVING 0x02 #define TYPE_D_DRIVING 0x03 /* CARD_CLK_EN */ #define SD_CLK_EN 0x04 #define MS_CLK_EN 0x08 /* CARD_SELECT */ #define SD_MOD_SEL 2 #define MS_MOD_SEL 3 /* CARD_SHARE_MODE */ #define CARD_SHARE_LQFP48 0x04 #define CARD_SHARE_QFN24 0x00 #define CARD_SHARE_LQFP_SEL 0x04 #define CARD_SHARE_XD 0x00 #define CARD_SHARE_SD 0x01 #define CARD_SHARE_MS 0x02 #define CARD_SHARE_MASK 0x03 /* SSC_CTL1 */ #define SSC_RSTB 0x80 #define SSC_8X_EN 0x40 #define SSC_FIX_FRAC 0x20 #define SSC_SEL_1M 0x00 #define SSC_SEL_2M 0x08 #define SSC_SEL_4M 0x10 #define SSC_SEL_8M 0x18 /* SSC_CTL2 */ #define SSC_DEPTH_MASK 0x03 #define SSC_DEPTH_DISALBE 0x00 #define SSC_DEPTH_2M 0x01 #define SSC_DEPTH_1M 0x02 #define SSC_DEPTH_512K 0x03 /* SD_VPCLK0_CTL */ #define PHASE_CHANGE 0x80 #define PHASE_NOT_RESET 0x40 /* SD_TRANSFER */ #define SD_TRANSFER_START 0x80 #define SD_TRANSFER_END 0x40 #define SD_STAT_IDLE 0x20 #define SD_TRANSFER_ERR 0x10 #define SD_TM_NORMAL_WRITE 0x00 #define SD_TM_AUTO_WRITE_3 0x01 #define SD_TM_AUTO_WRITE_4 0x02 #define SD_TM_AUTO_READ_3 0x05 #define SD_TM_AUTO_READ_4 0x06 #define SD_TM_CMD_RSP 0x08 #define SD_TM_AUTO_WRITE_1 0x09 #define SD_TM_AUTO_WRITE_2 0x0A #define SD_TM_NORMAL_READ 0x0C #define SD_TM_AUTO_READ_1 0x0D #define SD_TM_AUTO_READ_2 0x0E #define SD_TM_AUTO_TUNING 0x0F /* SD_CFG1 */ #define SD_CLK_DIVIDE_0 0x00 #define SD_CLK_DIVIDE_256 0xC0 #define SD_CLK_DIVIDE_128 0x80 #define SD_CLK_DIVIDE_MASK 0xC0 #define SD_BUS_WIDTH_1BIT 0x00 #define SD_BUS_WIDTH_4BIT 0x01 #define SD_BUS_WIDTH_8BIT 0x02 #define SD_ASYNC_FIFO_RST 0x10 #define SD_20_MODE 0x00 #define SD_DDR_MODE 0x04 #define SD_30_MODE 0x08 /* SD_CFG2 */ #define SD_CALCULATE_CRC7 0x00 #define SD_NO_CALCULATE_CRC7 0x80 #define SD_CHECK_CRC16 0x00 #define SD_NO_CHECK_CRC16 0x40 #define SD_WAIT_CRC_TO_EN 0x20 #define SD_WAIT_BUSY_END 0x08 #define SD_NO_WAIT_BUSY_END 0x00 #define SD_CHECK_CRC7 0x00 #define SD_NO_CHECK_CRC7 0x04 #define SD_RSP_LEN_0 0x00 #define SD_RSP_LEN_6 0x01 #define SD_RSP_LEN_17 0x02 #define SD_RSP_TYPE_R0 0x04 #define SD_RSP_TYPE_R1 0x01 #define SD_RSP_TYPE_R1b 0x09 #define SD_RSP_TYPE_R2 0x02 #define SD_RSP_TYPE_R3 0x05 #define SD_RSP_TYPE_R4 0x05 #define SD_RSP_TYPE_R5 0x01 #define SD_RSP_TYPE_R6 0x01 #define SD_RSP_TYPE_R7 0x01 /* SD_STAT1 */ #define SD_CRC7_ERR 0x80 #define SD_CRC16_ERR 0x40 #define SD_CRC_WRITE_ERR 0x20 #define SD_CRC_WRITE_ERR_MASK 0x1C #define GET_CRC_TIME_OUT 0x02 #define SD_TUNING_COMPARE_ERR 0x01 /* SD_DATA_STATE */ #define SD_DATA_IDLE 0x80 /* CARD_DATA_SOURCE */ #define PINGPONG_BUFFER 0x01 #define RING_BUFFER 0x00 /* CARD_OE */ #define SD_OUTPUT_EN 0x04 #define MS_OUTPUT_EN 0x08 /* CARD_STOP */ #define SD_STOP 0x04 #define MS_STOP 0x08 #define SD_CLR_ERR 0x40 #define MS_CLR_ERR 0x80 /* CARD_CLK_SOURCE */ #define CRC_FIX_CLK (0x00 << 0) #define CRC_VAR_CLK0 (0x01 << 0) #define CRC_VAR_CLK1 (0x02 << 0) #define SD30_FIX_CLK (0x00 << 2) #define SD30_VAR_CLK0 (0x01 << 2) #define SD30_VAR_CLK1 (0x02 << 2) #define SAMPLE_FIX_CLK (0x00 << 4) #define SAMPLE_VAR_CLK0 (0x01 << 4) #define SAMPLE_VAR_CLK1 (0x02 << 4) /* SD_SAMPLE_POINT_CTL */ #define DDR_FIX_RX_DAT 0x00 #define DDR_VAR_RX_DAT 0x80 #define DDR_FIX_RX_DAT_EDGE 0x00 #define DDR_FIX_RX_DAT_14_DELAY 0x40 #define DDR_FIX_RX_CMD 0x00 #define DDR_VAR_RX_CMD 0x20 #define DDR_FIX_RX_CMD_POS_EDGE 0x00 #define DDR_FIX_RX_CMD_14_DELAY 0x10 #define SD20_RX_POS_EDGE 0x00 #define SD20_RX_14_DELAY 0x08 #define SD20_RX_SEL_MASK 0x08 /* SD_PUSH_POINT_CTL */ #define DDR_FIX_TX_CMD_DAT 0x00 #define DDR_VAR_TX_CMD_DAT 0x80 #define DDR_FIX_TX_DAT_14_TSU 0x00 #define DDR_FIX_TX_DAT_12_TSU 0x40 #define DDR_FIX_TX_CMD_NEG_EDGE 0x00 #define DDR_FIX_TX_CMD_14_AHEAD 0x20 #define SD20_TX_NEG_EDGE 0x00 #define SD20_TX_14_AHEAD 0x10 #define SD20_TX_SEL_MASK 0x10 #define DDR_VAR_SDCLK_POL_SWAP 0x01 /* MS_CFG */ #define SAMPLE_TIME_RISING 0x00 #define SAMPLE_TIME_FALLING 0x80 #define PUSH_TIME_DEFAULT 0x00 #define PUSH_TIME_ODD 0x40 #define NO_EXTEND_TOGGLE 0x00 #define EXTEND_TOGGLE_CHK 0x20 #define MS_BUS_WIDTH_1 0x00 #define MS_BUS_WIDTH_4 0x10 #define MS_BUS_WIDTH_8 0x18 #define MS_2K_SECTOR_MODE 0x04 #define MS_512_SECTOR_MODE 0x00 #define MS_TOGGLE_TIMEOUT_EN 0x00 #define MS_TOGGLE_TIMEOUT_DISEN 0x01 #define MS_NO_CHECK_INT 0x02 /* MS_TRANS_CFG */ #define WAIT_INT 0x80 #define NO_WAIT_INT 0x00 #define NO_AUTO_READ_INT_REG 0x00 #define AUTO_READ_INT_REG 0x40 #define MS_CRC16_ERR 0x20 #define MS_RDY_TIMEOUT 0x10 #define MS_INT_CMDNK 0x08 #define MS_INT_BREQ 0x04 #define MS_INT_ERR 0x02 #define MS_INT_CED 0x01 /* MS_TRANSFER */ #define MS_TRANSFER_START 0x80 #define MS_TRANSFER_END 0x40 #define MS_TRANSFER_ERR 0x20 #define MS_BS_STATE 0x10 #define MS_TM_READ_BYTES 0x00 #define MS_TM_NORMAL_READ 0x01 #define MS_TM_WRITE_BYTES 0x04 #define MS_TM_NORMAL_WRITE 0x05 #define MS_TM_AUTO_READ 0x08 #define MS_TM_AUTO_WRITE 0x0C #define MS_TM_SET_CMD 0x06 #define MS_TM_COPY_PAGE 0x07 #define MS_TM_MULTI_READ 0x02 #define MS_TM_MULTI_WRITE 0x03 /* MC_FIFO_CTL */ #define FIFO_FLUSH 0x01 /* MC_DMA_RST */ #define DMA_RESET 0x01 /* MC_DMA_CTL */ #define DMA_TC_EQ_0 0x80 #define DMA_DIR_TO_CARD 0x00 #define DMA_DIR_FROM_CARD 0x02 #define DMA_EN 0x01 #define DMA_128 (0 << 2) #define DMA_256 (1 << 2) #define DMA_512 (2 << 2) #define DMA_1024 (3 << 2) #define DMA_PACK_SIZE_MASK 0x0C /* CARD_INT_PEND */ #define XD_INT 0x10 #define MS_INT 0x08 #define SD_INT 0x04 /* LED operations*/ static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02); } static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr) { return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03); } /* HW error clearing */ static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8); } static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr) { rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL, FIFO_FLUSH, FIFO_FLUSH); rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET); } #endif /* __RTS51139_H */
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 /* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/include/linux/mmc/host.h * * Host driver specific definitions. */ #ifndef LINUX_MMC_HOST_H #define LINUX_MMC_HOST_H #include <linux/sched.h> #include <linux/device.h> #include <linux/fault-inject.h> #include <linux/debugfs.h> #include <linux/mmc/core.h> #include <linux/mmc/card.h> #include <linux/mmc/pm.h> #include <linux/dma-direction.h> #include <linux/blk-crypto-profile.h> #include <linux/mmc/sd_uhs2.h> struct mmc_ios { unsigned int clock; /* clock rate */ unsigned short vdd; unsigned int power_delay_ms; /* waiting for stable power */ /* vdd stores the bit number of the selected voltage range from below. */ unsigned char bus_mode; /* command output mode */ #define MMC_BUSMODE_OPENDRAIN 1 #define MMC_BUSMODE_PUSHPULL 2 unsigned char chip_select; /* SPI chip select */ #define MMC_CS_DONTCARE 0 #define MMC_CS_HIGH 1 #define MMC_CS_LOW 2 unsigned char power_mode; /* power supply mode */ #define MMC_POWER_OFF 0 #define MMC_POWER_UP 1 #define MMC_POWER_ON 2 #define MMC_POWER_UNDEFINED 3 unsigned char bus_width; /* data bus width */ #define MMC_BUS_WIDTH_1 0 #define MMC_BUS_WIDTH_4 2 #define MMC_BUS_WIDTH_8 3 unsigned char timing; /* timing specification used */ #define MMC_TIMING_LEGACY 0 #define MMC_TIMING_MMC_HS 1 #define MMC_TIMING_SD_HS 2 #define MMC_TIMING_UHS_SDR12 3 #define MMC_TIMING_UHS_SDR25 4 #define MMC_TIMING_UHS_SDR50 5 #define MMC_TIMING_UHS_SDR104 6 #define MMC_TIMING_UHS_DDR50 7 #define MMC_TIMING_MMC_DDR52 8 #define MMC_TIMING_MMC_HS200 9 #define MMC_TIMING_MMC_HS400 10 #define MMC_TIMING_SD_EXP 11 #define MMC_TIMING_SD_EXP_1_2V 12 #define MMC_TIMING_UHS2_SPEED_A 13 #define MMC_TIMING_UHS2_SPEED_A_HD 14 #define MMC_TIMING_UHS2_SPEED_B 15 #define MMC_TIMING_UHS2_SPEED_B_HD 16 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ #define MMC_SIGNAL_VOLTAGE_330 0 #define MMC_SIGNAL_VOLTAGE_180 1 #define MMC_SIGNAL_VOLTAGE_120 2 unsigned char vqmmc2_voltage; #define MMC_VQMMC2_VOLTAGE_180 0 unsigned char drv_type; /* driver type (A, B, C, D) */ #define MMC_SET_DRIVER_TYPE_B 0 #define MMC_SET_DRIVER_TYPE_A 1 #define MMC_SET_DRIVER_TYPE_C 2 #define MMC_SET_DRIVER_TYPE_D 3 bool enhanced_strobe; /* hs400es selection */ }; struct mmc_clk_phase { bool valid; u16 in_deg; u16 out_deg; }; #define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1) struct mmc_clk_phase_map { struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES]; }; struct sd_uhs2_caps { u32 dap; u32 gap; u32 group_desc; u32 maxblk_len; u32 n_fcu; u8 n_lanes; u8 addr64; u8 card_type; u8 phy_rev; u8 speed_range; u8 n_lss_sync; u8 n_lss_dir; u8 link_rev; u8 host_type; u8 n_data_gap; u32 maxblk_len_set; u32 n_fcu_set; u8 n_lanes_set; u8 n_lss_sync_set; u8 n_lss_dir_set; u8 n_data_gap_set; u8 max_retry_set; }; enum sd_uhs2_operation { UHS2_PHY_INIT = 0, UHS2_SET_CONFIG, UHS2_ENABLE_INT, UHS2_DISABLE_INT, UHS2_ENABLE_CLK, UHS2_DISABLE_CLK, UHS2_CHECK_DORMANT, UHS2_SET_IOS, }; struct mmc_host; enum mmc_err_stat { MMC_ERR_CMD_TIMEOUT, MMC_ERR_CMD_CRC, MMC_ERR_DAT_TIMEOUT, MMC_ERR_DAT_CRC, MMC_ERR_AUTO_CMD, MMC_ERR_ADMA, MMC_ERR_TUNING, MMC_ERR_CMDQ_RED, MMC_ERR_CMDQ_GCE, MMC_ERR_CMDQ_ICCE, MMC_ERR_REQ_TIMEOUT, MMC_ERR_CMDQ_REQ_TIMEOUT, MMC_ERR_ICE_CFG, MMC_ERR_CTRL_TIMEOUT, MMC_ERR_UNEXPECTED_IRQ, MMC_ERR_MAX, }; struct mmc_host_ops { /* * It is optional for the host to implement pre_req and post_req in * order to support double buffering of requests (prepare one * request while another request is active). * pre_req() must always be followed by a post_req(). * To undo a call made to pre_req(), call post_req() with * a nonzero err condition. */ void (*post_req)(struct mmc_host *host, struct mmc_request *req, int err); void (*pre_req)(struct mmc_host *host, struct mmc_request *req); void (*request)(struct mmc_host *host, struct mmc_request *req); /* Submit one request to host in atomic context. */ int (*request_atomic)(struct mmc_host *host, struct mmc_request *req); /* * Avoid calling the next three functions too often or in a "fast * path", since underlaying controller might implement them in an * expensive and/or slow way. Also note that these functions might * sleep, so don't call them in the atomic contexts! */ /* * Notes to the set_ios callback: * ios->clock might be 0. For some controllers, setting 0Hz * as any other frequency works. However, some controllers * explicitly need to disable the clock. Otherwise e.g. voltage * switching might fail because the SDCLK is not really quiet. */ void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); /* * Return values for the get_ro callback should be: * 0 for a read/write card * 1 for a read-only card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened */ int (*get_ro)(struct mmc_host *host); /* * Return values for the get_cd callback should be: * 0 for a absent card * 1 for a present card * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened */ int (*get_cd)(struct mmc_host *host); void (*enable_sdio_irq)(struct mmc_host *host, int enable); /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */ void (*ack_sdio_irq)(struct mmc_host *host); /* optional callback for HC quirks */ void (*init_card)(struct mmc_host *host, struct mmc_card *card); int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); /* Check if the card is pulling dat[0] low */ int (*card_busy)(struct mmc_host *host); /* The tuning command opcode value is different for SD and eMMC cards */ int (*execute_tuning)(struct mmc_host *host, u32 opcode); /* Prepare HS400 target operating frequency depending host driver */ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); /* Execute HS400 tuning depending host driver */ int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card); /* Optional callback to prepare for SD high-speed tuning */ int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card); /* Optional callback to execute SD high-speed tuning */ int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card); /* Prepare switch to DDR during the HS400 init sequence */ int (*hs400_prepare_ddr)(struct mmc_host *host); /* Prepare for switching from HS400 to HS200 */ void (*hs400_downgrade)(struct mmc_host *host); /* Complete selection of HS400 */ void (*hs400_complete)(struct mmc_host *host); /* Prepare enhanced strobe depending host driver */ void (*hs400_enhanced_strobe)(struct mmc_host *host, struct mmc_ios *ios); int (*select_drive_strength)(struct mmc_card *card, unsigned int max_dtr, int host_drv, int card_drv, int *drv_type); /* Reset the eMMC card via RST_n */ void (*card_hw_reset)(struct mmc_host *host); void (*card_event)(struct mmc_host *host); /* * Optional callback to support controllers with HW issues for multiple * I/O. Returns the number of supported blocks for the request. */ int (*multi_io_quirk)(struct mmc_card *card, unsigned int direction, int blk_size); /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */ int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios); /* * The uhs2_control callback is used to execute SD UHS-II specific * operations. It's mandatory to implement for hosts that supports the * SD UHS-II interface (MMC_CAP2_SD_UHS2). Expected return values are a * negative errno in case of a failure or zero for success. */ int (*uhs2_control)(struct mmc_host *host, enum sd_uhs2_operation op); }; struct mmc_cqe_ops { /* Allocate resources, and make the CQE operational */ int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card); /* Free resources, and make the CQE non-operational */ void (*cqe_disable)(struct mmc_host *host); /* * Issue a read, write or DCMD request to the CQE. Also deal with the * effect of ->cqe_off(). */ int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq); /* Free resources (e.g. DMA mapping) associated with the request */ void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq); /* * Prepare the CQE and host controller to accept non-CQ commands. There * is no corresponding ->cqe_on(), instead ->cqe_request() is required * to deal with that. */ void (*cqe_off)(struct mmc_host *host); /* * Wait for all CQE tasks to complete. Return an error if recovery * becomes necessary. */ int (*cqe_wait_for_idle)(struct mmc_host *host); /* * Notify CQE that a request has timed out. Return false if the request * completed or true if a timeout happened in which case indicate if * recovery is needed. */ bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq, bool *recovery_needed); /* * Stop all CQE activity and prepare the CQE and host controller to * accept recovery commands. */ void (*cqe_recovery_start)(struct mmc_host *host); /* * Clear the queue and call mmc_cqe_request_done() on all requests. * Requests that errored will have the error set on the mmc_request * (data->error or cmd->error for DCMD). Requests that did not error * will have zero data bytes transferred. */ void (*cqe_recovery_finish)(struct mmc_host *host); }; /** * struct mmc_slot - MMC slot functions * * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL * @handler_priv: MMC/SD-card slot context * * Some MMC/SD host controllers implement slot-functions like card and * write-protect detection natively. However, a large number of controllers * leave these functions to the CPU. This struct provides a hook to attach * such slot-function drivers. */ struct mmc_slot { int cd_irq; bool cd_wake_enabled; void *handler_priv; }; struct regulator; struct mmc_pwrseq; struct mmc_supply { struct regulator *vmmc; /* Card power supply */ struct regulator *vqmmc; /* Optional Vccq supply */ struct regulator *vqmmc2; /* Optional supply for phy */ }; struct mmc_ctx { struct task_struct *task; }; struct mmc_host { struct device *parent; struct device class_dev; int index; const struct mmc_host_ops *ops; struct mmc_pwrseq *pwrseq; unsigned int f_min; unsigned int f_max; unsigned int f_init; u32 ocr_avail; u32 ocr_avail_sdio; /* SDIO-specific OCR */ u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ struct wakeup_source *ws; /* Enable consume of uevents */ u32 max_current_330; u32 max_current_300; u32 max_current_180; #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ #define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */ #define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */ #define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */ #define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */ #define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */ #define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */ #define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */ #define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */ #define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */ #define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */ #define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */ #define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */ #define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */ #define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */ #define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ u32 caps; /* Host capabilities */ #define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */ #define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */ #define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */ #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */ #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ #define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \ MMC_CAP_1_2V_DDR) #define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */ #define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */ #define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */ #define MMC_CAP_UHS_SDR25 (1 << 17) /* Host supports UHS SDR25 mode */ #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ #define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \ MMC_CAP_UHS_DDR50) #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */ #define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ #define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */ #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ #define MMC_CAP_HW_RESET (1 << 31) /* Reset the eMMC card via RST_n */ u32 caps2; /* More host capabilities */ #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ #define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ MMC_CAP2_HS200_1_2V_SDR) #define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */ #define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */ #define MMC_CAP2_SD_UHS2 (1 << 9) /* SD UHS-II support */ #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ #define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */ #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ MMC_CAP2_HS400_1_2V) #define MMC_CAP2_HSX00_1_8V (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V) #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */ #define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */ #define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */ #define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */ #define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */ #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ #define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */ #ifdef CONFIG_MMC_CRYPTO #define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */ #else #define MMC_CAP2_CRYPTO 0 #endif #define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */ bool uhs2_sd_tran; /* UHS-II flag for SD_TRAN state */ bool uhs2_app_cmd; /* UHS-II flag for APP command */ struct sd_uhs2_caps uhs2_caps; /* Host UHS-II capabilities */ int fixed_drv_type; /* fixed driver type for non-removable media */ mmc_pm_flag_t pm_caps; /* supported pm features */ /* host specific block data */ unsigned int max_seg_size; /* lim->max_segment_size */ unsigned short max_segs; /* lim->max_segments */ unsigned short unused; unsigned int max_req_size; /* maximum number of bytes in one req */ unsigned int max_blk_size; /* maximum size of one mmc block */ unsigned int max_blk_count; /* maximum number of blocks in one req */ unsigned int max_busy_timeout; /* max busy timeout in ms */ /* private data */ spinlock_t lock; /* lock for claim and bus ops */ struct mmc_ios ios; /* current io bus settings */ /* group bitfields together to minimize padding */ unsigned int use_spi_crc:1; unsigned int claimed:1; /* host exclusively claimed */ unsigned int doing_init_tune:1; /* initial tuning in progress */ unsigned int can_retune:1; /* re-tuning can be used */ unsigned int doing_retune:1; /* re-tuning in progress */ unsigned int retune_now:1; /* do re-tuning at next req */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ unsigned int can_dma_map_merge:1; /* merging can be used */ unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ int need_retune; /* re-tuning is needed */ int hold_retune; /* hold off re-tuning */ unsigned int retune_period; /* re-tuning period in secs */ struct timer_list retune_timer; /* for periodic re-tuning */ bool trigger_card_event; /* card_event necessary */ struct mmc_card *card; /* device attached to this host */ wait_queue_head_t wq; struct mmc_ctx *claimer; /* context that has host claimed */ int claim_cnt; /* "claim" nesting count */ struct mmc_ctx default_ctx; /* default context */ struct delayed_work detect; int detect_change; /* card detect flag */ struct mmc_slot slot; const struct mmc_bus_ops *bus_ops; /* current bus driver */ unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; struct work_struct sdio_irq_work; bool sdio_irq_pending; atomic_t sdio_irq_thread_abort; mmc_pm_flag_t pm_flags; /* requested pm features */ struct led_trigger *led; /* activity led */ #ifdef CONFIG_REGULATOR bool regulator_enabled; /* regulator state */ #endif struct mmc_supply supply; struct dentry *debugfs_root; /* Ongoing data transfer that allows commands during transfer */ struct mmc_request *ongoing_mrq; #ifdef CONFIG_FAIL_MMC_REQUEST struct fault_attr fail_mmc_request; #endif unsigned int actual_clock; /* Actual HC clock rate */ unsigned int slotno; /* used for sdio acpi binding */ int dsr_req; /* DSR value is valid */ u32 dsr; /* optional driver stage (DSR) value */ /* Command Queue Engine (CQE) support */ const struct mmc_cqe_ops *cqe_ops; void *cqe_private; int cqe_qdepth; bool cqe_enabled; bool cqe_on; /* Inline encryption support */ #ifdef CONFIG_MMC_CRYPTO struct blk_crypto_profile crypto_profile; #endif /* Host Software Queue support */ bool hsq_enabled; int hsq_depth; u32 err_stats[MMC_ERR_MAX]; unsigned long private[] ____cacheline_aligned; }; struct device_node; struct mmc_host *mmc_alloc_host(int extra, struct device *); struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra); int mmc_add_host(struct mmc_host *); void mmc_remove_host(struct mmc_host *); void mmc_free_host(struct mmc_host *); void mmc_of_parse_clk_phase(struct device *dev, struct mmc_clk_phase_map *map); int mmc_of_parse(struct mmc_host *host); int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask); static inline void *mmc_priv(struct mmc_host *host) { return (void *)host->private; } static inline struct mmc_host *mmc_from_priv(void *priv) { return container_of(priv, struct mmc_host, private); } #ifdef CONFIG_MMC_CRYPTO static inline struct mmc_host * mmc_from_crypto_profile(struct blk_crypto_profile *profile) { return container_of(profile, struct mmc_host, crypto_profile); } #endif #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) #define mmc_dev(x) ((x)->parent) #define mmc_classdev(x) (&(x)->class_dev) #define mmc_hostname(x) (dev_name(&(x)->class_dev)) void mmc_detect_change(struct mmc_host *, unsigned long delay); void mmc_request_done(struct mmc_host *, struct mmc_request *); void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); /* * May be called from host driver's system/runtime suspend/resume callbacks, * to know if SDIO IRQs has been claimed. */ static inline bool sdio_irq_claimed(struct mmc_host *host) { return host->sdio_irqs > 0; } static inline void mmc_signal_sdio_irq(struct mmc_host *host) { host->ops->enable_sdio_irq(host, 0); host->sdio_irq_pending = true; if (host->sdio_irq_thread) wake_up_process(host->sdio_irq_thread); } void sdio_signal_irq(struct mmc_host *host); #ifdef CONFIG_REGULATOR int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios); #else static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit) { return 0; } static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) { return -EINVAL; } static inline int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios) { return -EINVAL; } #endif int mmc_regulator_get_supply(struct mmc_host *mmc); int mmc_regulator_enable_vqmmc(struct mmc_host *mmc); void mmc_regulator_disable_vqmmc(struct mmc_host *mmc); static inline int mmc_card_is_removable(struct mmc_host *host) { return !(host->caps & MMC_CAP_NONREMOVABLE); } static inline int mmc_card_keep_power(struct mmc_host *host) { return host->pm_flags & MMC_PM_KEEP_POWER; } static inline int mmc_card_wake_sdio_irq(struct mmc_host *host) { return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ; } /* TODO: Move to private header */ static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || card->host->ios.timing == MMC_TIMING_MMC_HS; } /* TODO: Move to private header */ static inline int mmc_card_uhs(struct mmc_card *card) { return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 && card->host->ios.timing <= MMC_TIMING_UHS_DDR50; } static inline bool mmc_card_uhs2(struct mmc_host *host) { return host->ios.timing == MMC_TIMING_UHS2_SPEED_A || host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD || host->ios.timing == MMC_TIMING_UHS2_SPEED_B || host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD; } void mmc_retune_timer_stop(struct mmc_host *host); static inline void mmc_retune_needed(struct mmc_host *host) { if (host->can_retune) host->need_retune = 1; } static inline bool mmc_can_retune(struct mmc_host *host) { return host->can_retune == 1; } static inline bool mmc_doing_retune(struct mmc_host *host) { return host->doing_retune == 1; } static inline bool mmc_doing_tune(struct mmc_host *host) { return host->doing_retune == 1 || host->doing_init_tune == 1; } static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) { return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; } static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host, enum mmc_err_stat stat) { host->err_stats[stat] += 1; } static inline int mmc_card_uhs2_hd_mode(struct mmc_host *host) { return host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD || host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD; } int mmc_sd_switch(struct mmc_card *card, bool mode, int group, u8 value, u8 *resp); int mmc_send_status(struct mmc_card *card, u32 *status); int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode); int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #endif /* LINUX_MMC_HOST_H */
3 4 3 5 1 4 4 4 2 3 4 3 3 3 6 6 6 1 2 5 6 5 2 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat Arvo driver for Linux * * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ /* * Roccat Arvo is a gamer keyboard with 5 macro keys that can be configured in * 5 profiles. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-arvo.h" static ssize_t arvo_sysfs_show_mode_key(struct device *dev, struct device_attribute *attr, char *buf) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_mode_key temp_buf; int retval; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_MODE_KEY, &temp_buf, sizeof(struct arvo_mode_key)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return sysfs_emit(buf, "%d\n", temp_buf.state); } static ssize_t arvo_sysfs_set_mode_key(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_mode_key temp_buf; unsigned long state; int retval; retval = kstrtoul(buf, 10, &state); if (retval) return retval; temp_buf.command = ARVO_COMMAND_MODE_KEY; temp_buf.state = state; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, ARVO_COMMAND_MODE_KEY, &temp_buf, sizeof(struct arvo_mode_key)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return size; } static DEVICE_ATTR(mode_key, 0660, arvo_sysfs_show_mode_key, arvo_sysfs_set_mode_key); static ssize_t arvo_sysfs_show_key_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_key_mask temp_buf; int retval; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_KEY_MASK, &temp_buf, sizeof(struct arvo_key_mask)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return sysfs_emit(buf, "%d\n", temp_buf.key_mask); } static ssize_t arvo_sysfs_set_key_mask(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_key_mask temp_buf; unsigned long key_mask; int retval; retval = kstrtoul(buf, 10, &key_mask); if (retval) return retval; temp_buf.command = ARVO_COMMAND_KEY_MASK; temp_buf.key_mask = key_mask; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, ARVO_COMMAND_KEY_MASK, &temp_buf, sizeof(struct arvo_key_mask)); mutex_unlock(&arvo->arvo_lock); if (retval) return retval; return size; } static DEVICE_ATTR(key_mask, 0660, arvo_sysfs_show_key_mask, arvo_sysfs_set_key_mask); /* retval is 1-5 on success, < 0 on error */ static int arvo_get_actual_profile(struct usb_device *usb_dev) { struct arvo_actual_profile temp_buf; int retval; retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE, &temp_buf, sizeof(struct arvo_actual_profile)); if (retval) return retval; return temp_buf.actual_profile; } static ssize_t arvo_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return sysfs_emit(buf, "%d\n", arvo->actual_profile); } static ssize_t arvo_sysfs_set_actual_profile(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev->parent->parent)); struct arvo_actual_profile temp_buf; unsigned long profile; int retval; retval = kstrtoul(buf, 10, &profile); if (retval) return retval; if (profile < 1 || profile > 5) return -EINVAL; temp_buf.command = ARVO_COMMAND_ACTUAL_PROFILE; temp_buf.actual_profile = profile; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE, &temp_buf, sizeof(struct arvo_actual_profile)); if (!retval) { arvo->actual_profile = profile; retval = size; } mutex_unlock(&arvo->arvo_lock); return retval; } static DEVICE_ATTR(actual_profile, 0660, arvo_sysfs_show_actual_profile, arvo_sysfs_set_actual_profile); static ssize_t arvo_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_send(usb_dev, command, buf, real_size); mutex_unlock(&arvo->arvo_lock); return (retval ? retval : real_size); } static ssize_t arvo_sysfs_read(struct file *fp, struct kobject *kobj, void *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&arvo->arvo_lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&arvo->arvo_lock); return (retval ? retval : real_size); } static ssize_t arvo_sysfs_write_button(struct file *fp, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return arvo_sysfs_write(fp, kobj, buf, off, count, sizeof(struct arvo_button), ARVO_COMMAND_BUTTON); } static const BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button, sizeof(struct arvo_button)); static ssize_t arvo_sysfs_read_info(struct file *fp, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return arvo_sysfs_read(fp, kobj, buf, off, count, sizeof(struct arvo_info), ARVO_COMMAND_INFO); } static const BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL, sizeof(struct arvo_info)); static struct attribute *arvo_attrs[] = { &dev_attr_mode_key.attr, &dev_attr_key_mask.attr, &dev_attr_actual_profile.attr, NULL, }; static const struct bin_attribute *const arvo_bin_attributes[] = { &bin_attr_button, &bin_attr_info, NULL, }; static const struct attribute_group arvo_group = { .attrs = arvo_attrs, .bin_attrs_new = arvo_bin_attributes, }; static const struct attribute_group *arvo_groups[] = { &arvo_group, NULL, }; static const struct class arvo_class = { .name = "arvo", .dev_groups = arvo_groups, }; static int arvo_init_arvo_device_struct(struct usb_device *usb_dev, struct arvo_device *arvo) { int retval; mutex_init(&arvo->arvo_lock); retval = arvo_get_actual_profile(usb_dev); if (retval < 0) return retval; arvo->actual_profile = retval; return 0; } static int arvo_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct arvo_device *arvo; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) { hid_set_drvdata(hdev, NULL); return 0; } arvo = kzalloc(sizeof(*arvo), GFP_KERNEL); if (!arvo) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, arvo); retval = arvo_init_arvo_device_struct(usb_dev, arvo); if (retval) { hid_err(hdev, "couldn't init struct arvo_device\n"); goto exit_free; } retval = roccat_connect(&arvo_class, hdev, sizeof(struct arvo_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { arvo->chrdev_minor = retval; arvo->roccat_claimed = 1; } return 0; exit_free: kfree(arvo); return retval; } static void arvo_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct arvo_device *arvo; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) return; arvo = hid_get_drvdata(hdev); if (arvo->roccat_claimed) roccat_disconnect(arvo->chrdev_minor); kfree(arvo); } static int arvo_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; if (!hid_is_usb(hdev)) return -EINVAL; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = arvo_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install keyboard\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void arvo_remove(struct hid_device *hdev) { arvo_remove_specials(hdev); hid_hw_stop(hdev); } static void arvo_report_to_chrdev(struct arvo_device const *arvo, u8 const *data) { struct arvo_special_report const *special_report; struct arvo_roccat_report roccat_report; special_report = (struct arvo_special_report const *)data; roccat_report.profile = arvo->actual_profile; roccat_report.button = special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON; if ((special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION) == ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS) roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_PRESS; else roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_RELEASE; roccat_report_event(arvo->chrdev_minor, (uint8_t const *)&roccat_report); } static int arvo_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct arvo_device *arvo = hid_get_drvdata(hdev); if (size != 3) return 0; if (arvo && arvo->roccat_claimed) arvo_report_to_chrdev(arvo, data); return 0; } static const struct hid_device_id arvo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { } }; MODULE_DEVICE_TABLE(hid, arvo_devices); static struct hid_driver arvo_driver = { .name = "arvo", .id_table = arvo_devices, .probe = arvo_probe, .remove = arvo_remove, .raw_event = arvo_raw_event }; static int __init arvo_init(void) { int retval; retval = class_register(&arvo_class); if (retval) return retval; retval = hid_register_driver(&arvo_driver); if (retval) class_unregister(&arvo_class); return retval; } static void __exit arvo_exit(void) { hid_unregister_driver(&arvo_driver); class_unregister(&arvo_class); } module_init(arvo_init); module_exit(arvo_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Arvo driver"); MODULE_LICENSE("GPL v2");
587 575 12 154 2 43 29 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_FILE_REF_H #define _LINUX_FILE_REF_H #include <linux/atomic.h> #include <linux/preempt.h> #include <linux/types.h> /* * file_ref is a reference count implementation specifically for use by * files. It takes inspiration from rcuref but differs in key aspects * such as support for SLAB_TYPESAFE_BY_RCU type caches. * * FILE_REF_ONEREF FILE_REF_MAXREF * 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL * <-------------------valid -------------------> * * FILE_REF_SATURATED * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL * <-----------------------saturation zone----------------------> * * FILE_REF_RELEASED FILE_REF_DEAD * 0xC000000000000000UL 0xE000000000000000UL * <-------------------dead zone-------------------> * * FILE_REF_NOREF * 0xFFFFFFFFFFFFFFFFUL */ #ifdef CONFIG_64BIT #define FILE_REF_ONEREF 0x0000000000000000UL #define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL #define FILE_REF_SATURATED 0xA000000000000000UL #define FILE_REF_RELEASED 0xC000000000000000UL #define FILE_REF_DEAD 0xE000000000000000UL #define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL #else #define FILE_REF_ONEREF 0x00000000U #define FILE_REF_MAXREF 0x7FFFFFFFU #define FILE_REF_SATURATED 0xA0000000U #define FILE_REF_RELEASED 0xC0000000U #define FILE_REF_DEAD 0xE0000000U #define FILE_REF_NOREF 0xFFFFFFFFU #endif typedef struct { #ifdef CONFIG_64BIT atomic64_t refcnt; #else atomic_t refcnt; #endif } file_ref_t; /** * file_ref_init - Initialize a file reference count * @ref: Pointer to the reference count * @cnt: The initial reference count typically '1' */ static inline void file_ref_init(file_ref_t *ref, unsigned long cnt) { atomic_long_set(&ref->refcnt, cnt - 1); } bool __file_ref_put(file_ref_t *ref, unsigned long cnt); /** * file_ref_get - Acquire one reference on a file * @ref: Pointer to the reference count * * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF. * * Provides full memory ordering. * * Return: False if the attempt to acquire a reference failed. This happens * when the last reference has been put already. True if a reference * was successfully acquired */ static __always_inline __must_check bool file_ref_get(file_ref_t *ref) { /* * Unconditionally increase the reference count with full * ordering. The saturation and dead zones provide enough * tolerance for this. * * If this indicates negative the file in question the fail can * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU. * Hence, unconditionally altering the file reference count to * e.g., reset the file reference count back to the middle of * the deadzone risk end up marking someone else's file as dead * behind their back. * * It would be possible to do a careful: * * cnt = atomic_long_inc_return(); * if (likely(cnt >= 0)) * return true; * * and then something like: * * if (cnt >= FILE_REF_RELEASE) * atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD), * * to set the value back to the middle of the deadzone. But it's * practically impossible to go from FILE_REF_DEAD to * FILE_REF_ONEREF. It would need 2305843009213693952/2^61 * file_ref_get()s to resurrect such a dead file. */ return !atomic_long_add_negative(1, &ref->refcnt); } /** * file_ref_inc - Acquire one reference on a file * @ref: Pointer to the reference count * * Acquire an additional reference on a file. Warns if the caller didn't * already hold a reference. */ static __always_inline void file_ref_inc(file_ref_t *ref) { long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt); WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference"); } /** * file_ref_put -- Release a file reference * @ref: Pointer to the reference count * * Provides release memory ordering, such that prior loads and stores * are done before, and provides an acquire ordering on success such * that free() must come after. * * Return: True if this was the last reference with no future references * possible. This signals the caller that it can safely release * the object which is protected by the reference counter. * False if there are still active references or the put() raced * with a concurrent get()/put() pair. Caller is not allowed to * release the protected object. */ static __always_inline __must_check bool file_ref_put(file_ref_t *ref) { long cnt; /* * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put() * calls don't risk UAFs when a file is recyclyed, it is still * vulnerable to UAFs caused by freeing the whole slab page once * it becomes unused. Prevent file_ref_put() from being * preempted protects against this. */ guard(preempt)(); /* * Unconditionally decrease the reference count. The saturation * and dead zones provide enough tolerance for this. If this * fails then we need to handle the last reference drop and * cases inside the saturation and dead zones. */ cnt = atomic_long_dec_return(&ref->refcnt); if (cnt >= 0) return false; return __file_ref_put(ref, cnt); } /** * file_ref_put_close - drop a reference expecting it would transition to FILE_REF_NOREF * @ref: Pointer to the reference count * * Semantically it is equivalent to calling file_ref_put(), but it trades lower * performance in face of other CPUs also modifying the refcount for higher * performance when this happens to be the last reference. * * For the last reference file_ref_put() issues 2 atomics. One to drop the * reference and another to transition it to FILE_REF_DEAD. This routine does * the work in one step, but in order to do it has to pre-read the variable which * decreases scalability. * * Use with close() et al, stick to file_ref_put() by default. */ static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref) { long old; old = atomic_long_read(&ref->refcnt); if (likely(old == FILE_REF_ONEREF)) { if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD))) return true; } return file_ref_put(ref); } /** * file_ref_read - Read the number of file references * @ref: Pointer to the reference count * * Return: The number of held references (0 ... N) */ static inline unsigned long file_ref_read(file_ref_t *ref) { unsigned long c = atomic_long_read(&ref->refcnt); /* Return 0 if within the DEAD zone. */ return c >= FILE_REF_RELEASED ? 0 : c + 1; } /* * __file_ref_read_raw - Return the value stored in ref->refcnt * @ref: Pointer to the reference count * * Return: The raw value found in the counter * * A hack for file_needs_f_pos_lock(), you probably want to use * file_ref_read() instead. */ static inline unsigned long __file_ref_read_raw(file_ref_t *ref) { return atomic_long_read(&ref->refcnt); } #endif
10 10 10 14 14 14 14 14 14 14 14 14 14 14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 // SPDX-License-Identifier: GPL-2.0-only /* * MLO link handling * * Copyright (C) 2022-2024 Intel Corporation */ #include <linux/slab.h> #include <linux/kernel.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "key.h" #include "debugfs_netdev.h" void ieee80211_link_setup(struct ieee80211_link_data *link) { if (link->sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_mgd_setup_link(link); } void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, int link_id, struct ieee80211_link_data *link, struct ieee80211_bss_conf *link_conf) { bool deflink = link_id < 0; if (link_id < 0) link_id = 0; rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf); rcu_assign_pointer(sdata->link[link_id], link); link->sdata = sdata; link->link_id = link_id; link->conf = link_conf; link_conf->link_id = link_id; link_conf->vif = &sdata->vif; link->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; link->user_power_level = sdata->local->user_power_level; link_conf->txpower = INT_MIN; wiphy_work_init(&link->csa.finalize_work, ieee80211_csa_finalize_work); wiphy_work_init(&link->color_change_finalize_work, ieee80211_color_change_finalize_work); wiphy_delayed_work_init(&link->color_collision_detect_work, ieee80211_color_collision_detection_work); INIT_LIST_HEAD(&link->assigned_chanctx_list); INIT_LIST_HEAD(&link->reserved_chanctx_list); wiphy_delayed_work_init(&link->dfs_cac_timer_work, ieee80211_dfs_cac_timer_work); if (!deflink) { switch (sdata->vif.type) { case NL80211_IFTYPE_AP: ether_addr_copy(link_conf->addr, sdata->wdev.links[link_id].addr); link_conf->bssid = link_conf->addr; WARN_ON(!(sdata->wdev.valid_links & BIT(link_id))); break; case NL80211_IFTYPE_STATION: /* station sets the bssid in ieee80211_mgd_setup_link */ break; default: WARN_ON(1); } ieee80211_link_debugfs_add(link); } } void ieee80211_link_stop(struct ieee80211_link_data *link) { if (link->sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_mgd_stop_link(link); wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, &link->color_collision_detect_work); wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->color_change_finalize_work); wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->csa.finalize_work); if (link->sdata->wdev.links[link->link_id].cac_started) { wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, &link->dfs_cac_timer_work); cfg80211_cac_event(link->sdata->dev, &link->conf->chanreq.oper, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, link->link_id); } ieee80211_link_release_channel(link); } struct link_container { struct ieee80211_link_data data; struct ieee80211_bss_conf conf; }; static void ieee80211_tear_down_links(struct ieee80211_sub_if_data *sdata, struct link_container **links, u16 mask) { struct ieee80211_link_data *link; LIST_HEAD(keys); unsigned int link_id; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!(mask & BIT(link_id))) continue; link = &links[link_id]->data; if (link_id == 0 && !link) link = &sdata->deflink; if (WARN_ON(!link)) continue; ieee80211_remove_link_keys(link, &keys); ieee80211_link_debugfs_remove(link); ieee80211_link_stop(link); } synchronize_rcu(); ieee80211_free_key_list(sdata->local, &keys); } static void ieee80211_free_links(struct ieee80211_sub_if_data *sdata, struct link_container **links) { unsigned int link_id; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) kfree(links[link_id]); } static int ieee80211_check_dup_link_addrs(struct ieee80211_sub_if_data *sdata) { unsigned int i, j; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { struct ieee80211_link_data *link1; link1 = sdata_dereference(sdata->link[i], sdata); if (!link1) continue; for (j = i + 1; j < IEEE80211_MLD_MAX_NUM_LINKS; j++) { struct ieee80211_link_data *link2; link2 = sdata_dereference(sdata->link[j], sdata); if (!link2) continue; if (ether_addr_equal(link1->conf->addr, link2->conf->addr)) return -EALREADY; } } return 0; } static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata, u16 valid_links, u16 dormant_links) { sdata->vif.valid_links = valid_links; sdata->vif.dormant_links = dormant_links; if (!valid_links || WARN((~valid_links & dormant_links) || !(valid_links & ~dormant_links), "Invalid links: valid=0x%x, dormant=0x%x", valid_links, dormant_links)) { sdata->vif.active_links = 0; sdata->vif.dormant_links = 0; return; } switch (sdata->vif.type) { case NL80211_IFTYPE_AP: /* in an AP all links are always active */ sdata->vif.active_links = valid_links; /* AP links are not expected to be disabled */ WARN_ON(dormant_links); break; case NL80211_IFTYPE_STATION: if (sdata->vif.active_links) break; sdata->vif.active_links = valid_links & ~dormant_links; WARN_ON(hweight16(sdata->vif.active_links) > 1); break; default: WARN_ON(1); } } static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, struct link_container **to_free, u16 new_links, u16 dormant_links) { u16 old_links = sdata->vif.valid_links; u16 old_active = sdata->vif.active_links; unsigned long add = new_links & ~old_links; unsigned long rem = old_links & ~new_links; unsigned int link_id; int ret; struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *link; struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]; struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS]; bool use_deflink = old_links == 0; /* set for error case */ lockdep_assert_wiphy(sdata->local->hw.wiphy); memset(to_free, 0, sizeof(links)); if (old_links == new_links && dormant_links == sdata->vif.dormant_links) return 0; /* if there were no old links, need to clear the pointers to deflink */ if (!old_links) rem |= BIT(0); /* allocate new link structures first */ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { ret = -ENOMEM; goto free; } links[link_id] = link; } /* keep track of the old pointers for the driver */ BUILD_BUG_ON(sizeof(old) != sizeof(sdata->vif.link_conf)); memcpy(old, sdata->vif.link_conf, sizeof(old)); /* and for us in error cases */ BUILD_BUG_ON(sizeof(old_data) != sizeof(sdata->link)); memcpy(old_data, sdata->link, sizeof(old_data)); /* grab old links to free later */ for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { if (rcu_access_pointer(sdata->link[link_id]) != &sdata->deflink) { /* * we must have allocated the data through this path so * we know we can free both at the same time */ to_free[link_id] = container_of(rcu_access_pointer(sdata->link[link_id]), typeof(*links[link_id]), data); } RCU_INIT_POINTER(sdata->link[link_id], NULL); RCU_INIT_POINTER(sdata->vif.link_conf[link_id], NULL); } if (!old_links) ieee80211_debugfs_recreate_netdev(sdata, true); /* link them into data structures */ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { WARN_ON(!use_deflink && rcu_access_pointer(sdata->link[link_id]) == &sdata->deflink); link = links[link_id]; ieee80211_link_init(sdata, link_id, &link->data, &link->conf); ieee80211_link_setup(&link->data); } if (new_links == 0) ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf); ret = ieee80211_check_dup_link_addrs(sdata); if (!ret) { /* for keys we will not be able to undo this */ ieee80211_tear_down_links(sdata, to_free, rem); ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links); /* tell the driver */ ret = drv_change_vif_links(sdata->local, sdata, old_links & old_active, new_links & sdata->vif.active_links, old); if (!new_links) ieee80211_debugfs_recreate_netdev(sdata, false); } if (ret) { /* restore config */ memcpy(sdata->link, old_data, sizeof(old_data)); memcpy(sdata->vif.link_conf, old, sizeof(old)); ieee80211_set_vif_links_bitmaps(sdata, old_links, dormant_links); /* and free (only) the newly allocated links */ memset(to_free, 0, sizeof(links)); goto free; } /* use deflink/bss_conf again if and only if there are no more links */ use_deflink = new_links == 0; goto deinit; free: /* if we failed during allocation, only free all */ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { kfree(links[link_id]); links[link_id] = NULL; } deinit: if (use_deflink) ieee80211_link_init(sdata, -1, &sdata->deflink, &sdata->vif.bss_conf); return ret; } int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, u16 new_links, u16 dormant_links) { struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; int ret; ret = ieee80211_vif_update_links(sdata, links, new_links, dormant_links); ieee80211_free_links(sdata, links); return ret; } static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, u16 active_links) { struct ieee80211_bss_conf *link_confs[IEEE80211_MLD_MAX_NUM_LINKS]; struct ieee80211_local *local = sdata->local; u16 old_active = sdata->vif.active_links; unsigned long rem = old_active & ~active_links; unsigned long add = active_links & ~old_active; struct sta_info *sta; unsigned int link_id; int ret, i; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EINVAL; if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return -EINVAL; /* nothing to do */ if (old_active == active_links) return 0; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) link_confs[i] = sdata_dereference(sdata->vif.link_conf[i], sdata); if (add) { sdata->vif.active_links |= active_links; ret = drv_change_vif_links(local, sdata, old_active, sdata->vif.active_links, link_confs); if (ret) { sdata->vif.active_links = old_active; return ret; } } for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); ieee80211_teardown_tdls_peers(link); __ieee80211_link_release_channel(link, true); /* * If CSA is (still) active while the link is deactivated, * just schedule the channel switch work for the time we * had previously calculated, and we'll take the process * from there. */ if (link->conf->csa_active) wiphy_delayed_work_queue(local->hw.wiphy, &link->u.mgd.csa.switch_work, link->u.mgd.csa.time - jiffies); } for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); /* * This call really should not fail. Unfortunately, it appears * that this may happen occasionally with some drivers. Should * it happen, we are stuck in a bad place as going backwards is * not really feasible. * * So lets just tell link_use_channel that it must not fail to * assign the channel context (from mac80211's perspective) and * assume the driver is going to trigger a recovery flow if it * had a failure. * That really is not great nor guaranteed to work. But at least * the internal mac80211 state remains consistent and there is * a chance that we can recover. */ ret = _ieee80211_link_use_channel(link, &link->conf->chanreq, IEEE80211_CHANCTX_SHARED, true); WARN_ON_ONCE(ret); /* * inform about the link info changed parameters after all * stations are also added */ } list_for_each_entry(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; /* this is very temporary, but do it anyway */ __ieee80211_sta_recalc_aggregates(sta, old_active | active_links); ret = drv_change_sta_links(local, sdata, &sta->sta, old_active, old_active | active_links); WARN_ON_ONCE(ret); } ret = ieee80211_key_switch_links(sdata, rem, add); WARN_ON_ONCE(ret); list_for_each_entry(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; __ieee80211_sta_recalc_aggregates(sta, active_links); ret = drv_change_sta_links(local, sdata, &sta->sta, old_active | active_links, active_links); WARN_ON_ONCE(ret); /* * Do it again, just in case - the driver might very * well have called ieee80211_sta_recalc_aggregates() * from there when filling in the new links, which * would set it wrong since the vif's active links are * not switched yet... */ __ieee80211_sta_recalc_aggregates(sta, active_links); } for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); ieee80211_mgd_set_link_qos_params(link); ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BSSID | BSS_CHANGED_CQM | BSS_CHANGED_QOS | BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH | BSS_CHANGED_TWT | BSS_CHANGED_HE_OBSS_PD | BSS_CHANGED_HE_BSS_COLOR); } old_active = sdata->vif.active_links; sdata->vif.active_links = active_links; if (rem) { ret = drv_change_vif_links(local, sdata, old_active, active_links, link_confs); WARN_ON_ONCE(ret); } return 0; } int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; u16 old_active; int ret; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(!active_links)) return -EINVAL; old_active = sdata->vif.active_links; if (old_active == active_links) return 0; if (!drv_can_activate_links(local, sdata, active_links)) return -EINVAL; if (old_active & active_links) { /* * if there's at least one link that stays active across * the change then switch to it (to those) first, and * then enable the additional links */ ret = _ieee80211_set_active_links(sdata, old_active & active_links); if (!ret) ret = _ieee80211_set_active_links(sdata, active_links); } else { /* otherwise switch directly */ ret = _ieee80211_set_active_links(sdata, active_links); } return ret; } EXPORT_SYMBOL_GPL(ieee80211_set_active_links); void ieee80211_set_active_links_async(struct ieee80211_vif *vif, u16 active_links) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (WARN_ON(!active_links)) return; if (!ieee80211_sdata_running(sdata)) return; if (sdata->vif.type != NL80211_IFTYPE_STATION) return; if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return; /* nothing to do */ if (sdata->vif.active_links == active_links) return; sdata->desired_active_links = active_links; wiphy_work_queue(sdata->local->hw.wiphy, &sdata->activate_links_work); } EXPORT_SYMBOL_GPL(ieee80211_set_active_links_async);
13 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (c) 2000-2002 Vojtech Pavlik <vojtech@ucw.cz> * Copyright (c) 2001-2002, 2007 Johann Deneux <johann.deneux@gmail.com> * * USB/RS232 I-Force joysticks and wheels. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/circ_buf.h> #include <linux/mutex.h> /* This module provides arbitrary resource management routines. * I use it to manage the device's memory. * Despite the name of this module, I am *not* going to access the ioports. */ #include <linux/ioport.h> #define IFORCE_MAX_LENGTH 16 #define IFORCE_EFFECTS_MAX 32 /* Each force feedback effect is made of one core effect, which can be * associated to at most to effect modifiers */ #define FF_MOD1_IS_USED 0 #define FF_MOD2_IS_USED 1 #define FF_CORE_IS_USED 2 #define FF_CORE_IS_PLAYED 3 /* Effect is currently being played */ #define FF_CORE_SHOULD_PLAY 4 /* User wants the effect to be played */ #define FF_CORE_UPDATE 5 /* Effect is being updated */ #define FF_MODCORE_CNT 6 struct iforce_core_effect { /* Information about where modifiers are stored in the device's memory */ struct resource mod1_chunk; struct resource mod2_chunk; unsigned long flags[BITS_TO_LONGS(FF_MODCORE_CNT)]; }; #define FF_CMD_EFFECT 0x010e #define FF_CMD_ENVELOPE 0x0208 #define FF_CMD_MAGNITUDE 0x0303 #define FF_CMD_PERIOD 0x0407 #define FF_CMD_CONDITION 0x050a #define FF_CMD_AUTOCENTER 0x4002 #define FF_CMD_PLAY 0x4103 #define FF_CMD_ENABLE 0x4201 #define FF_CMD_GAIN 0x4301 #define FF_CMD_QUERY 0xff01 /* Buffer for async write */ #define XMIT_SIZE 256 #define XMIT_INC(var, n) (var)+=n; (var)&= XMIT_SIZE -1 /* iforce::xmit_flags */ #define IFORCE_XMIT_RUNNING 0 #define IFORCE_XMIT_AGAIN 1 struct iforce_device { u16 idvendor; u16 idproduct; char *name; signed short *btn; signed short *abs; signed short *ff; }; struct iforce; struct iforce_xport_ops { void (*xmit)(struct iforce *iforce); int (*get_id)(struct iforce *iforce, u8 id, u8 *response_data, size_t *response_len); int (*start_io)(struct iforce *iforce); void (*stop_io)(struct iforce *iforce); }; struct iforce { struct input_dev *dev; /* Input device interface */ struct iforce_device *type; const struct iforce_xport_ops *xport_ops; spinlock_t xmit_lock; /* Buffer used for asynchronous sending of bytes to the device */ struct circ_buf xmit; unsigned char xmit_data[XMIT_SIZE]; unsigned long xmit_flags[1]; /* Force Feedback */ wait_queue_head_t wait; struct resource device_memory; struct iforce_core_effect core_effects[IFORCE_EFFECTS_MAX]; struct mutex mem_mutex; }; /* Get hi and low bytes of a 16-bits int */ #define HI(a) ((unsigned char)((a) >> 8)) #define LO(a) ((unsigned char)((a) & 0xff)) /* For many parameters, it seems that 0x80 is a special value that should * be avoided. Instead, we replace this value by 0x7f */ #define HIFIX80(a) ((unsigned char)(((a)<0? (a)+255 : (a))>>8)) /* Encode a time value */ #define TIME_SCALE(a) (a) static inline int iforce_get_id_packet(struct iforce *iforce, u8 id, u8 *response_data, size_t *response_len) { return iforce->xport_ops->get_id(iforce, id, response_data, response_len); } static inline void iforce_clear_xmit_and_wake(struct iforce *iforce) { clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags); wake_up_all(&iforce->wait); } /* Public functions */ /* iforce-main.c */ int iforce_init_device(struct device *parent, u16 bustype, struct iforce *iforce); /* iforce-packets.c */ int iforce_control_playback(struct iforce*, u16 id, unsigned int); void iforce_process_packet(struct iforce *iforce, u8 packet_id, u8 *data, size_t len); int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data); void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data); /* iforce-ff.c */ int iforce_upload_periodic(struct iforce *, struct ff_effect *, struct ff_effect *); int iforce_upload_constant(struct iforce *, struct ff_effect *, struct ff_effect *); int iforce_upload_condition(struct iforce *, struct ff_effect *, struct ff_effect *); /* Public variables */ extern struct serio_driver iforce_serio_drv; extern struct usb_driver iforce_usb_driver;
112 108 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_INETDEVICE_H #define _LINUX_INETDEVICE_H #ifdef __KERNEL__ #include <linux/bitmap.h> #include <linux/if.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> #include <linux/timer.h> #include <linux/sysctl.h> #include <linux/rtnetlink.h> #include <linux/refcount.h> struct ipv4_devconf { void *sysctl; int data[IPV4_DEVCONF_MAX]; DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); }; #define MC_HASH_SZ_LOG 9 struct in_device { struct net_device *dev; netdevice_tracker dev_tracker; refcount_t refcnt; int dead; struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ struct ip_mc_list __rcu * __rcu *mc_hash; int mc_count; /* Number of installed mcasts */ spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; unsigned long mr_v2_seen; unsigned long mr_maxdelay; unsigned long mr_qi; /* Query Interval */ unsigned long mr_qri; /* Query Response Interval */ unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; u32 mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct rcu_head rcu_head; }; #define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) #define IPV4_DEVCONF_RO(cnf, attr) READ_ONCE(IPV4_DEVCONF(cnf, attr)) #define IPV4_DEVCONF_ALL(net, attr) \ IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr) #define IPV4_DEVCONF_ALL_RO(net, attr) READ_ONCE(IPV4_DEVCONF_ALL(net, attr)) static inline int ipv4_devconf_get(const struct in_device *in_dev, int index) { index--; return READ_ONCE(in_dev->cnf.data[index]); } static inline void ipv4_devconf_set(struct in_device *in_dev, int index, int val) { index--; set_bit(index, in_dev->cnf.state); WRITE_ONCE(in_dev->cnf.data[index], val); } static inline void ipv4_devconf_setall(struct in_device *in_dev) { bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX); } #define IN_DEV_CONF_GET(in_dev, attr) \ ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr) #define IN_DEV_CONF_SET(in_dev, attr, val) \ ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val)) #define IN_DEV_ANDCONF(in_dev, attr) \ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr) && \ IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_NET_ORCONF(in_dev, net, attr) \ (IPV4_DEVCONF_ALL_RO(net, attr) || \ IN_DEV_CONF_GET((in_dev), attr)) #define IN_DEV_ORCONF(in_dev, attr) \ IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr) #define IN_DEV_MAXCONF(in_dev, attr) \ (max(IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr), \ IN_DEV_CONF_GET((in_dev), attr))) #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) #define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING) #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ ACCEPT_SOURCE_ROUTE) #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) #define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) #define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) #define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP_PVLAN) #define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) #define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) #define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ SECURE_REDIRECTS) #define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG) #define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID) #define IN_DEV_PROMOTE_SECONDARIES(in_dev) \ IN_DEV_ORCONF((in_dev), \ PROMOTE_SECONDARIES) #define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET) #define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \ IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET) #define IN_DEV_RX_REDIRECTS(in_dev) \ ((IN_DEV_FORWARD(in_dev) && \ IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \ || (!IN_DEV_FORWARD(in_dev) && \ IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) #define IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) \ IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) #define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \ ARP_EVICT_NOCARRIER) struct in_ifaddr { struct hlist_node addr_lst; struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; __be32 ifa_local; __be32 ifa_address; __be32 ifa_mask; __u32 ifa_rt_priority; __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; unsigned char ifa_proto; __u32 ifa_flags; char ifa_label[IFNAMSIZ]; /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ __u32 ifa_valid_lft; __u32 ifa_preferred_lft; unsigned long ifa_cstamp; /* created timestamp */ unsigned long ifa_tstamp; /* updated timestamp */ }; struct in_validator_info { __be32 ivi_addr; struct in_device *ivi_dev; struct netlink_ext_ack *extack; }; int register_inetaddr_notifier(struct notifier_block *nb); int unregister_inetaddr_notifier(struct notifier_block *nb); int register_inetaddr_validator_notifier(struct notifier_block *nb); int unregister_inetaddr_validator_notifier(struct notifier_block *nb); void inet_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv4_devconf *devconf); struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) { return __ip_dev_find(net, addr, true); } int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); #ifdef CONFIG_INET int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); #else static inline int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size) { return 0; } #endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope); struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } /* * Check if a mask is acceptable. */ static __inline__ bool bad_mask(__be32 mask, __be32 addr) { __u32 hmask; if (addr & (mask = ~mask)) return true; hmask = ntohl(mask); if (hmask & (hmask+1)) return true; return false; } #define in_dev_for_each_ifa_rtnl(ifa, in_dev) \ for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ ifa = rtnl_dereference(ifa->ifa_next)) #define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \ for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \ ifa = rtnl_net_dereference(net, ifa->ifa_next)) #define in_dev_for_each_ifa_rcu(ifa, in_dev) \ for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ ifa = rcu_dereference(ifa->ifa_next)) static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->ip_ptr); } static inline struct in_device *in_dev_get(const struct net_device *dev) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) refcount_inc(&in_dev->refcnt); rcu_read_unlock(); return in_dev; } static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) { return rtnl_dereference(dev->ip_ptr); } static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev) { return rtnl_net_dereference(dev_net(dev), dev->ip_ptr); } /* called with rcu_read_lock or rtnl held */ static inline bool ip_ignore_linkdown(const struct net_device *dev) { struct in_device *in_dev; bool rc = false; in_dev = rcu_dereference_rtnl(dev->ip_ptr); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) rc = true; return rc; } static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev) { struct in_device *in_dev = __in_dev_get_rcu(dev); return in_dev ? in_dev->arp_parms : NULL; } void in_dev_finish_destroy(struct in_device *idev); static inline void in_dev_put(struct in_device *idev) { if (refcount_dec_and_test(&idev->refcnt)) in_dev_finish_destroy(idev); } #define __in_dev_put(idev) refcount_dec(&(idev)->refcnt) #define in_dev_hold(idev) refcount_inc(&(idev)->refcnt) #endif /* __KERNEL__ */ static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) return htonl(~((1U<<(32-logmask))-1)); return 0; } static __inline__ int inet_mask_len(__be32 mask) { __u32 hmask = ntohl(mask); if (!hmask) return 0; return 32 - ffz(~hmask); } #endif /* _LINUX_INETDEVICE_H */
1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 // SPDX-License-Identifier: GPL-2.0-or-later /* * Mars-Semi MR97311A library * Copyright (C) 2005 <bradlch@hotmail.com> * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "mars" #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/Mars USB Camera Driver"); MODULE_LICENSE("GPL"); #define QUALITY 50 /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *brightness; struct v4l2_ctrl *saturation; struct v4l2_ctrl *sharpness; struct v4l2_ctrl *gamma; struct { /* illuminator control cluster */ struct v4l2_ctrl *illum_top; struct v4l2_ctrl *illum_bottom; }; u8 jpeg_hdr[JPEG_HDR_SZ]; }; /* V4L2 controls supported by the driver */ static void setbrightness(struct gspca_dev *gspca_dev, s32 val); static void setcolors(struct gspca_dev *gspca_dev, s32 val); static void setgamma(struct gspca_dev *gspca_dev, s32 val); static void setsharpness(struct gspca_dev *gspca_dev, s32 val); static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, }; static const __u8 mi_data[0x20] = { /* 01 02 03 04 05 06 07 08 */ 0x48, 0x22, 0x01, 0x47, 0x10, 0x00, 0x00, 0x00, /* 09 0a 0b 0c 0d 0e 0f 10 */ 0x00, 0x01, 0x30, 0x01, 0x30, 0x01, 0x30, 0x01, /* 11 12 13 14 15 16 17 18 */ 0x30, 0x00, 0x04, 0x00, 0x06, 0x01, 0xe2, 0x02, /* 19 1a 1b 1c 1d 1e 1f 20 */ 0x82, 0x00, 0x20, 0x17, 0x80, 0x08, 0x0c, 0x00 }; /* write <len> bytes from gspca_dev->usb_buf */ static void reg_w(struct gspca_dev *gspca_dev, int len) { int alen, ret; if (gspca_dev->usb_err < 0) return; ret = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 4), gspca_dev->usb_buf, len, &alen, 500); /* timeout in milliseconds */ if (ret < 0) { pr_err("reg write [%02x] error %d\n", gspca_dev->usb_buf[0], ret); gspca_dev->usb_err = ret; } } static void mi_w(struct gspca_dev *gspca_dev, u8 addr, u8 value) { gspca_dev->usb_buf[0] = 0x1f; gspca_dev->usb_buf[1] = 0; /* control byte */ gspca_dev->usb_buf[2] = addr; gspca_dev->usb_buf[3] = value; reg_w(gspca_dev, 4); } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { gspca_dev->usb_buf[0] = 0x61; gspca_dev->usb_buf[1] = val; reg_w(gspca_dev, 2); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { gspca_dev->usb_buf[0] = 0x5f; gspca_dev->usb_buf[1] = val << 3; gspca_dev->usb_buf[2] = ((val >> 2) & 0xf8) | 0x04; reg_w(gspca_dev, 3); } static void setgamma(struct gspca_dev *gspca_dev, s32 val) { gspca_dev->usb_buf[0] = 0x06; gspca_dev->usb_buf[1] = val * 0x40; reg_w(gspca_dev, 2); } static void setsharpness(struct gspca_dev *gspca_dev, s32 val) { gspca_dev->usb_buf[0] = 0x67; gspca_dev->usb_buf[1] = val * 4 + 3; reg_w(gspca_dev, 2); } static void setilluminators(struct gspca_dev *gspca_dev, bool top, bool bottom) { /* both are off if not streaming */ gspca_dev->usb_buf[0] = 0x22; if (top) gspca_dev->usb_buf[1] = 0x76; else if (bottom) gspca_dev->usb_buf[1] = 0x7a; else gspca_dev->usb_buf[1] = 0x7e; reg_w(gspca_dev, 2); } static int mars_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (ctrl->id == V4L2_CID_ILLUMINATORS_1) { /* only one can be on at a time */ if (ctrl->is_new && ctrl->val) sd->illum_bottom->val = 0; if (sd->illum_bottom->is_new && sd->illum_bottom->val) sd->illum_top->val = 0; } if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_GAMMA: setgamma(gspca_dev, ctrl->val); break; case V4L2_CID_ILLUMINATORS_1: setilluminators(gspca_dev, sd->illum_top->val, sd->illum_bottom->val); break; case V4L2_CID_SHARPNESS: setsharpness(gspca_dev, ctrl->val); break; default: return -EINVAL; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops mars_ctrl_ops = { .s_ctrl = mars_s_ctrl, }; /* this function is called at probe time */ static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 6); sd->brightness = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 30, 1, 15); sd->saturation = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 200); sd->gamma = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops, V4L2_CID_GAMMA, 0, 3, 1, 1); sd->sharpness = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops, V4L2_CID_SHARPNESS, 0, 2, 1, 1); sd->illum_top = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops, V4L2_CID_ILLUMINATORS_1, 0, 1, 1, 0); sd->illum_top->flags |= V4L2_CTRL_FLAG_UPDATE; sd->illum_bottom = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops, V4L2_CID_ILLUMINATORS_2, 0, 1, 1, 0); sd->illum_bottom->flags |= V4L2_CTRL_FLAG_UPDATE; if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->illum_top); return 0; } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 *data; int i; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ jpeg_set_qual(sd->jpeg_hdr, QUALITY); data = gspca_dev->usb_buf; data[0] = 0x01; /* address */ data[1] = 0x01; reg_w(gspca_dev, 2); /* Initialize the MR97113 chip register */ data[0] = 0x00; /* address */ data[1] = 0x0c | 0x01; /* reg 0 */ data[2] = 0x01; /* reg 1 */ data[3] = gspca_dev->pixfmt.width / 8; /* h_size , reg 2 */ data[4] = gspca_dev->pixfmt.height / 8; /* v_size , reg 3 */ data[5] = 0x30; /* reg 4, MI, PAS5101 : * 0x30 for 24mhz , 0x28 for 12mhz */ data[6] = 0x02; /* reg 5, H start - was 0x04 */ data[7] = v4l2_ctrl_g_ctrl(sd->gamma) * 0x40; /* reg 0x06: gamma */ data[8] = 0x01; /* reg 7, V start - was 0x03 */ /* if (h_size == 320 ) */ /* data[9]= 0x56; * reg 8, 24MHz, 2:1 scale down */ /* else */ data[9] = 0x52; /* reg 8, 24MHz, no scale down */ /*jfm: from win trace*/ data[10] = 0x18; reg_w(gspca_dev, 11); data[0] = 0x23; /* address */ data[1] = 0x09; /* reg 35, append frame header */ reg_w(gspca_dev, 2); data[0] = 0x3c; /* address */ /* if (gspca_dev->width == 1280) */ /* data[1] = 200; * reg 60, pc-cam frame size * (unit: 4KB) 800KB */ /* else */ data[1] = 50; /* 50 reg 60, pc-cam frame size * (unit: 4KB) 200KB */ reg_w(gspca_dev, 2); /* auto dark-gain */ data[0] = 0x5e; /* address */ data[1] = 0; /* reg 94, Y Gain (auto) */ /*jfm: from win trace*/ /* reg 0x5f/0x60 (LE) = saturation */ /* h (60): xxxx x100 * l (5f): xxxx x000 */ data[2] = v4l2_ctrl_g_ctrl(sd->saturation) << 3; data[3] = ((v4l2_ctrl_g_ctrl(sd->saturation) >> 2) & 0xf8) | 0x04; data[4] = v4l2_ctrl_g_ctrl(sd->brightness); /* reg 0x61 = brightness */ data[5] = 0x00; reg_w(gspca_dev, 6); data[0] = 0x67; /*jfm: from win trace*/ data[1] = v4l2_ctrl_g_ctrl(sd->sharpness) * 4 + 3; data[2] = 0x14; reg_w(gspca_dev, 3); data[0] = 0x69; data[1] = 0x2f; data[2] = 0x28; data[3] = 0x42; reg_w(gspca_dev, 4); data[0] = 0x63; data[1] = 0x07; reg_w(gspca_dev, 2); /*jfm: win trace - many writes here to reg 0x64*/ /* initialize the MI sensor */ for (i = 0; i < sizeof mi_data; i++) mi_w(gspca_dev, i + 1, mi_data[i]); data[0] = 0x00; data[1] = 0x4d; /* ISOC transferring enable... */ reg_w(gspca_dev, 2); setilluminators(gspca_dev, v4l2_ctrl_g_ctrl(sd->illum_top), v4l2_ctrl_g_ctrl(sd->illum_bottom)); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (v4l2_ctrl_g_ctrl(sd->illum_top) || v4l2_ctrl_g_ctrl(sd->illum_bottom)) { setilluminators(gspca_dev, false, false); msleep(20); } gspca_dev->usb_buf[0] = 1; gspca_dev->usb_buf[1] = 0; reg_w(gspca_dev, 2); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int p; if (len < 6) { /* gspca_dev->last_packet_type = DISCARD_PACKET; */ return; } for (p = 0; p < len - 6; p++) { if (data[0 + p] == 0xff && data[1 + p] == 0xff && data[2 + p] == 0x00 && data[3 + p] == 0xff && data[4 + p] == 0x96) { if (data[5 + p] == 0x64 || data[5 + p] == 0x65 || data[5 + p] == 0x66 || data[5 + p] == 0x67) { gspca_dbg(gspca_dev, D_PACK, "sof offset: %d len: %d\n", p, len); gspca_frame_add(gspca_dev, LAST_PACKET, data, p); /* put the JPEG header */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); data += p + 16; len -= p + 16; break; } } } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x093a, 0x050f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
3188 3187 3184 3194 3190 3188 3194 3188 3184 3188 3183 3194 3188 3176 3191 3109 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 // SPDX-License-Identifier: GPL-2.0-only /* * Link physical devices with ACPI devices support * * Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2005 Intel Corp. */ #define pr_fmt(fmt) "ACPI: " fmt #include <linux/acpi_iort.h> #include <linux/export.h> #include <linux/init.h> #include <linux/list.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/platform_device.h> #include "internal.h" static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); #define PHYSICAL_NODE_STRING "physical_node" #define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10) int register_acpi_bus_type(struct acpi_bus_type *type) { if (acpi_disabled) return -ENODEV; if (type && type->match && type->find_companion) { down_write(&bus_type_sem); list_add_tail(&type->list, &bus_type_list); up_write(&bus_type_sem); pr_info("bus type %s registered\n", type->name); return 0; } return -ENODEV; } EXPORT_SYMBOL_GPL(register_acpi_bus_type); int unregister_acpi_bus_type(struct acpi_bus_type *type) { if (acpi_disabled) return 0; if (type) { down_write(&bus_type_sem); list_del_init(&type->list); up_write(&bus_type_sem); pr_info("bus type %s unregistered\n", type->name); return 0; } return -ENODEV; } EXPORT_SYMBOL_GPL(unregister_acpi_bus_type); static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) { struct acpi_bus_type *tmp, *ret = NULL; down_read(&bus_type_sem); list_for_each_entry(tmp, &bus_type_list, list) { if (tmp->match(dev)) { ret = tmp; break; } } up_read(&bus_type_sem); return ret; } #define FIND_CHILD_MIN_SCORE 1 #define FIND_CHILD_MID_SCORE 2 #define FIND_CHILD_MAX_SCORE 3 static int match_any(struct acpi_device *adev, void *not_used) { return 1; } static bool acpi_dev_has_children(struct acpi_device *adev) { return acpi_dev_for_each_child(adev, match_any, NULL) > 0; } static int find_child_checks(struct acpi_device *adev, bool check_children) { unsigned long long sta; acpi_status status; if (check_children && !acpi_dev_has_children(adev)) return -ENODEV; status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); if (status == AE_NOT_FOUND) { /* * Special case: backlight device objects without _STA are * preferred to other objects with the same _ADR value, because * it is more likely that they are actually useful. */ if (adev->pnp.type.backlight) return FIND_CHILD_MID_SCORE; return FIND_CHILD_MIN_SCORE; } if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) return -ENODEV; /* * If the device has a _HID returning a valid ACPI/PNP device ID, it is * better to make it look less attractive here, so that the other device * with the same _ADR value (that may not have a valid device ID) can be * matched going forward. [This means a second spec violation in a row, * so whatever we do here is best effort anyway.] */ if (adev->pnp.type.platform_id) return FIND_CHILD_MIN_SCORE; return FIND_CHILD_MAX_SCORE; } struct find_child_walk_data { struct acpi_device *adev; u64 address; int score; bool check_sta; bool check_children; }; static int check_one_child(struct acpi_device *adev, void *data) { struct find_child_walk_data *wd = data; int score; if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address) return 0; if (!wd->adev) { /* * This is the first matching object, so save it. If it is not * necessary to look for any other matching objects, stop the * search. */ wd->adev = adev; return !(wd->check_sta || wd->check_children); } /* * There is more than one matching device object with the same _ADR * value. That really is unexpected, so we are kind of beyond the scope * of the spec here. We have to choose which one to return, though. * * First, get the score for the previously found object and terminate * the walk if it is maximum. */ if (!wd->score) { score = find_child_checks(wd->adev, wd->check_children); if (score == FIND_CHILD_MAX_SCORE) return 1; wd->score = score; } /* * Second, if the object that has just been found has a better score, * replace the previously found one with it and terminate the walk if * the new score is maximum. */ score = find_child_checks(adev, wd->check_children); if (score > wd->score) { wd->adev = adev; if (score == FIND_CHILD_MAX_SCORE) return 1; wd->score = score; } /* Continue, because there may be better matches. */ return 0; } static struct acpi_device *acpi_find_child(struct acpi_device *parent, u64 address, bool check_children, bool check_sta) { struct find_child_walk_data wd = { .address = address, .check_children = check_children, .check_sta = check_sta, .adev = NULL, .score = 0, }; if (parent) acpi_dev_for_each_child(parent, check_one_child, &wd); return wd.adev; } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children) { return acpi_find_child(parent, address, check_children, true); } EXPORT_SYMBOL_GPL(acpi_find_child_device); struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, acpi_bus_address adr) { return acpi_find_child(adev, adr, false, false); } EXPORT_SYMBOL_GPL(acpi_find_child_by_adr); static void acpi_physnode_link_name(char *buf, unsigned int node_id) { if (node_id > 0) snprintf(buf, PHYSICAL_NODE_NAME_SIZE, PHYSICAL_NODE_STRING "%u", node_id); else strcpy(buf, PHYSICAL_NODE_STRING); } int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) { struct acpi_device_physical_node *physical_node, *pn; char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; if (has_acpi_companion(dev)) { if (acpi_dev) { dev_warn(dev, "ACPI companion already set\n"); return -EINVAL; } else { acpi_dev = ACPI_COMPANION(dev); } } if (!acpi_dev) return -EINVAL; acpi_dev_get(acpi_dev); get_device(dev); physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); if (!physical_node) { retval = -ENOMEM; goto err; } mutex_lock(&acpi_dev->physical_node_lock); /* * Keep the list sorted by node_id so that the IDs of removed nodes can * be recycled easily. */ physnode_list = &acpi_dev->physical_node_list; node_id = 0; list_for_each_entry(pn, &acpi_dev->physical_node_list, node) { /* Sanity check. */ if (pn->dev == dev) { mutex_unlock(&acpi_dev->physical_node_lock); dev_warn(dev, "Already associated with ACPI node\n"); kfree(physical_node); if (ACPI_COMPANION(dev) != acpi_dev) goto err; put_device(dev); acpi_dev_put(acpi_dev); return 0; } if (pn->node_id == node_id) { physnode_list = &pn->node; node_id++; } } physical_node->node_id = node_id; physical_node->dev = dev; list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); if (retval) dev_err(&acpi_dev->dev, "Failed to create link %s (%d)\n", physical_node_name, retval); retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj, "firmware_node"); if (retval) dev_err(dev, "Failed to create link firmware_node (%d)\n", retval); mutex_unlock(&acpi_dev->physical_node_lock); if (acpi_dev->wakeup.flags.valid) device_set_wakeup_capable(dev, true); return 0; err: ACPI_COMPANION_SET(dev, NULL); put_device(dev); acpi_dev_put(acpi_dev); return retval; } EXPORT_SYMBOL_GPL(acpi_bind_one); int acpi_unbind_one(struct device *dev) { struct acpi_device *acpi_dev = ACPI_COMPANION(dev); struct acpi_device_physical_node *entry; if (!acpi_dev) return 0; mutex_lock(&acpi_dev->physical_node_lock); list_for_each_entry(entry, &acpi_dev->physical_node_list, node) if (entry->dev == dev) { char physnode_name[PHYSICAL_NODE_NAME_SIZE]; list_del(&entry->node); acpi_dev->physical_node_count--; acpi_physnode_link_name(physnode_name, entry->node_id); sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); sysfs_remove_link(&dev->kobj, "firmware_node"); ACPI_COMPANION_SET(dev, NULL); /* Drop references taken by acpi_bind_one(). */ put_device(dev); acpi_dev_put(acpi_dev); kfree(entry); break; } mutex_unlock(&acpi_dev->physical_node_lock); return 0; } EXPORT_SYMBOL_GPL(acpi_unbind_one); void acpi_device_notify(struct device *dev) { struct acpi_device *adev; int ret; ret = acpi_bind_one(dev, NULL); if (ret) { struct acpi_bus_type *type = acpi_get_bus_type(dev); if (!type) goto err; adev = type->find_companion(dev); if (!adev) { dev_dbg(dev, "ACPI companion not found\n"); goto err; } ret = acpi_bind_one(dev, adev); if (ret) goto err; if (type->setup) { type->setup(dev); goto done; } } else { adev = ACPI_COMPANION(dev); if (dev_is_pci(dev)) { pci_acpi_setup(dev, adev); goto done; } else if (dev_is_platform(dev)) { acpi_configure_pmsi_domain(dev); } } if (adev->handler && adev->handler->bind) adev->handler->bind(dev); done: acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n", dev_name(dev)); return; err: dev_dbg(dev, "No ACPI support\n"); } void acpi_device_notify_remove(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); if (!adev) return; if (dev_is_pci(dev)) pci_acpi_cleanup(dev, adev); else if (adev->handler && adev->handler->unbind) adev->handler->unbind(dev); acpi_unbind_one(dev); }
7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Logitech Gaming Wheels * * Including G27, G25, DFP, DFGT, FFEX, Momo, Momo2 & * Speed Force Wireless (WiiWheel) * * Copyright (c) 2010 Simon Wood <simon@mungewell.org> */ /* */ #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include "usbhid/usbhid.h" #include "hid-lg.h" #include "hid-lg4ff.h" #include "hid-ids.h" #define LG4FF_MMODE_IS_MULTIMODE 0 #define LG4FF_MMODE_SWITCHED 1 #define LG4FF_MMODE_NOT_MULTIMODE 2 #define LG4FF_MODE_NATIVE_IDX 0 #define LG4FF_MODE_DFEX_IDX 1 #define LG4FF_MODE_DFP_IDX 2 #define LG4FF_MODE_G25_IDX 3 #define LG4FF_MODE_DFGT_IDX 4 #define LG4FF_MODE_G27_IDX 5 #define LG4FF_MODE_G29_IDX 6 #define LG4FF_MODE_MAX_IDX 7 #define LG4FF_MODE_NATIVE BIT(LG4FF_MODE_NATIVE_IDX) #define LG4FF_MODE_DFEX BIT(LG4FF_MODE_DFEX_IDX) #define LG4FF_MODE_DFP BIT(LG4FF_MODE_DFP_IDX) #define LG4FF_MODE_G25 BIT(LG4FF_MODE_G25_IDX) #define LG4FF_MODE_DFGT BIT(LG4FF_MODE_DFGT_IDX) #define LG4FF_MODE_G27 BIT(LG4FF_MODE_G27_IDX) #define LG4FF_MODE_G29 BIT(LG4FF_MODE_G29_IDX) #define LG4FF_DFEX_TAG "DF-EX" #define LG4FF_DFEX_NAME "Driving Force / Formula EX" #define LG4FF_DFP_TAG "DFP" #define LG4FF_DFP_NAME "Driving Force Pro" #define LG4FF_G25_TAG "G25" #define LG4FF_G25_NAME "G25 Racing Wheel" #define LG4FF_G27_TAG "G27" #define LG4FF_G27_NAME "G27 Racing Wheel" #define LG4FF_G29_TAG "G29" #define LG4FF_G29_NAME "G29 Racing Wheel" #define LG4FF_DFGT_TAG "DFGT" #define LG4FF_DFGT_NAME "Driving Force GT" #define LG4FF_FFEX_REV_MAJ 0x21 #define LG4FF_FFEX_REV_MIN 0x00 static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range); static void lg4ff_set_range_g25(struct hid_device *hid, u16 range); struct lg4ff_wheel_data { const u32 product_id; u16 combine; u16 range; const u16 min_range; const u16 max_range; #ifdef CONFIG_LEDS_CLASS u8 led_state; struct led_classdev *led[5]; #endif const u32 alternate_modes; const char * const real_tag; const char * const real_name; const u16 real_product_id; void (*set_range)(struct hid_device *hid, u16 range); }; struct lg4ff_device_entry { spinlock_t report_lock; /* Protect output HID report */ struct hid_report *report; struct lg4ff_wheel_data wdata; }; static const signed short lg4ff_wheel_effects[] = { FF_CONSTANT, FF_AUTOCENTER, -1 }; static const signed short no_wheel_effects[] = { -1 }; struct lg4ff_wheel { const u32 product_id; const signed short *ff_effects; const u16 min_range; const u16 max_range; void (*set_range)(struct hid_device *hid, u16 range); }; struct lg4ff_compat_mode_switch { const u8 cmd_count; /* Number of commands to send */ const u8 cmd[]; }; struct lg4ff_wheel_ident_info { const u32 modes; const u16 mask; const u16 result; const u16 real_product_id; }; struct lg4ff_multimode_wheel { const u16 product_id; const u32 alternate_modes; const char *real_tag; const char *real_name; }; struct lg4ff_alternate_mode { const u16 product_id; const char *tag; const char *name; }; static const struct lg4ff_wheel lg4ff_devices[] = { {USB_DEVICE_ID_LOGITECH_WINGMAN_FG, no_wheel_effects, 40, 180, NULL}, {USB_DEVICE_ID_LOGITECH_WINGMAN_FFG, lg4ff_wheel_effects, 40, 180, NULL}, {USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_dfp}, {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_G29_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL} }; static const struct lg4ff_multimode_wheel lg4ff_multimode_wheels[] = { {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_DFP_TAG, LG4FF_DFP_NAME}, {USB_DEVICE_ID_LOGITECH_G25_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_G25_TAG, LG4FF_G25_NAME}, {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_DFGT_TAG, LG4FF_DFGT_NAME}, {USB_DEVICE_ID_LOGITECH_G27_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_G27_TAG, LG4FF_G27_NAME}, {USB_DEVICE_ID_LOGITECH_G29_WHEEL, LG4FF_MODE_NATIVE | LG4FF_MODE_G29 | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, LG4FF_G29_TAG, LG4FF_G29_NAME}, }; static const struct lg4ff_alternate_mode lg4ff_alternate_modes[] = { [LG4FF_MODE_NATIVE_IDX] = {0, "native", ""}, [LG4FF_MODE_DFEX_IDX] = {USB_DEVICE_ID_LOGITECH_WHEEL, LG4FF_DFEX_TAG, LG4FF_DFEX_NAME}, [LG4FF_MODE_DFP_IDX] = {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, LG4FF_DFP_TAG, LG4FF_DFP_NAME}, [LG4FF_MODE_G25_IDX] = {USB_DEVICE_ID_LOGITECH_G25_WHEEL, LG4FF_G25_TAG, LG4FF_G25_NAME}, [LG4FF_MODE_DFGT_IDX] = {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, LG4FF_DFGT_TAG, LG4FF_DFGT_NAME}, [LG4FF_MODE_G27_IDX] = {USB_DEVICE_ID_LOGITECH_G27_WHEEL, LG4FF_G27_TAG, LG4FF_G27_NAME}, [LG4FF_MODE_G29_IDX] = {USB_DEVICE_ID_LOGITECH_G29_WHEEL, LG4FF_G29_TAG, LG4FF_G29_NAME}, }; /* Multimode wheel identificators */ static const struct lg4ff_wheel_ident_info lg4ff_dfp_ident_info = { LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xf000, 0x1000, USB_DEVICE_ID_LOGITECH_DFP_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g25_ident_info = { LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xff00, 0x1200, USB_DEVICE_ID_LOGITECH_G25_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g27_ident_info = { LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xfff0, 0x1230, USB_DEVICE_ID_LOGITECH_G27_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_dfgt_ident_info = { LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xff00, 0x1300, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g29_ident_info = { LG4FF_MODE_G29 | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xfff8, 0x1350, USB_DEVICE_ID_LOGITECH_G29_WHEEL }; static const struct lg4ff_wheel_ident_info lg4ff_g29_ident_info2 = { LG4FF_MODE_G29 | LG4FF_MODE_G27 | LG4FF_MODE_G25 | LG4FF_MODE_DFGT | LG4FF_MODE_DFP | LG4FF_MODE_DFEX, 0xff00, 0x8900, USB_DEVICE_ID_LOGITECH_G29_WHEEL }; /* Multimode wheel identification checklists */ static const struct lg4ff_wheel_ident_info *lg4ff_main_checklist[] = { &lg4ff_g29_ident_info, &lg4ff_g29_ident_info2, &lg4ff_dfgt_ident_info, &lg4ff_g27_ident_info, &lg4ff_g25_ident_info, &lg4ff_dfp_ident_info }; /* Compatibility mode switching commands */ /* EXT_CMD9 - Understood by G27 and DFGT */ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_dfex = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x00, 0x01, 0x00, 0x00, 0x00} /* Switch mode to DF-EX with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_dfp = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x01, 0x01, 0x00, 0x00, 0x00} /* Switch mode to DFP with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_g25 = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x02, 0x01, 0x00, 0x00, 0x00} /* Switch mode to G25 with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_dfgt = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00} /* Switch mode to DFGT with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_g27 = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00} /* Switch mode to G27 with detach */ }; static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext09_g29 = { 2, {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* Revert mode upon USB reset */ 0xf8, 0x09, 0x05, 0x01, 0x01, 0x00, 0x00} /* Switch mode to G29 with detach */ }; /* EXT_CMD1 - Understood by DFP, G25, G27 and DFGT */ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext01_dfp = { 1, {0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* EXT_CMD16 - Understood by G25 and G27 */ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext16_g25 = { 1, {0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00} }; /* Recalculates X axis value accordingly to currently selected range */ static s32 lg4ff_adjust_dfp_x_axis(s32 value, u16 range) { u16 max_range; s32 new_value; if (range == 900) return value; else if (range == 200) return value; else if (range < 200) max_range = 200; else max_range = 900; new_value = 8192 + mult_frac(value - 8192, max_range, range); if (new_value < 0) return 0; else if (new_value > 16383) return 16383; else return new_value; } int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data) { struct lg4ff_device_entry *entry = drv_data->device_props; s32 new_value = 0; if (!entry) { hid_err(hid, "Device properties not found"); return 0; } switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: switch (usage->code) { case ABS_X: new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range); input_event(field->hidinput->input, usage->type, usage->code, new_value); return 1; default: return 0; } default: return 0; } } int lg4ff_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *rd, int size, struct lg_drv_data *drv_data) { int offset; struct lg4ff_device_entry *entry = drv_data->device_props; if (!entry) return 0; /* adjust HID report present combined pedals data */ if (entry->wdata.combine) { switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_WHEEL: rd[5] = rd[3]; rd[6] = 0x7F; return 1; case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: rd[4] = rd[3]; rd[5] = 0x7F; return 1; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: rd[5] = rd[4]; rd[6] = 0x7F; return 1; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: case USB_DEVICE_ID_LOGITECH_G27_WHEEL: offset = 5; break; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: case USB_DEVICE_ID_LOGITECH_G29_WHEEL: offset = 6; break; case USB_DEVICE_ID_LOGITECH_WII_WHEEL: offset = 3; break; default: return 0; } /* Compute a combined axis when wheel does not supply it */ rd[offset] = (0xFF + rd[offset] - rd[offset+1]) >> 1; rd[offset+1] = 0x7F; return 1; } return 0; } static void lg4ff_init_wheel_data(struct lg4ff_wheel_data * const wdata, const struct lg4ff_wheel *wheel, const struct lg4ff_multimode_wheel *mmode_wheel, const u16 real_product_id) { u32 alternate_modes = 0; const char *real_tag = NULL; const char *real_name = NULL; if (mmode_wheel) { alternate_modes = mmode_wheel->alternate_modes; real_tag = mmode_wheel->real_tag; real_name = mmode_wheel->real_name; } { struct lg4ff_wheel_data t_wdata = { .product_id = wheel->product_id, .real_product_id = real_product_id, .combine = 0, .min_range = wheel->min_range, .max_range = wheel->max_range, .set_range = wheel->set_range, .alternate_modes = alternate_modes, .real_tag = real_tag, .real_name = real_name }; memcpy(wdata, &t_wdata, sizeof(t_wdata)); } } static int lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; int x; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } value = entry->report->field[0]->value; #define CLAMP(x) do { if (x < 0) x = 0; else if (x > 0xff) x = 0xff; } while (0) switch (effect->type) { case FF_CONSTANT: x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */ CLAMP(x); spin_lock_irqsave(&entry->report_lock, flags); if (x == 0x80) { /* De-activate force in slot-1*/ value[0] = 0x13; value[1] = 0x00; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); return 0; } value[0] = 0x11; /* Slot 1 */ value[1] = 0x08; value[2] = x; value[3] = 0x80; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); break; } return 0; } /* Sends default autocentering command compatible with * all wheels except Formula Force EX */ static void lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); s32 *value; u32 expand_a, expand_b; struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; /* De-activate Auto-Center */ spin_lock_irqsave(&entry->report_lock, flags); if (magnitude == 0) { value[0] = 0xf5; value[1] = 0x00; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); return; } if (magnitude <= 0xaaaa) { expand_a = 0x0c * magnitude; expand_b = 0x80 * magnitude; } else { expand_a = (0x0c * 0xaaaa) + 0x06 * (magnitude - 0xaaaa); expand_b = (0x80 * 0xaaaa) + 0xff * (magnitude - 0xaaaa); } /* Adjust for non-MOMO wheels */ switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: break; default: expand_a = expand_a >> 1; break; } value[0] = 0xfe; value[1] = 0x0d; value[2] = expand_a / 0xaaaa; value[3] = expand_a / 0xaaaa; value[4] = expand_b / 0xaaaa; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); /* Activate Auto-Center */ value[0] = 0x14; value[1] = 0x00; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends autocentering command compatible with Formula Force EX */ static void lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; magnitude = magnitude * 90 / 65535; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xfe; value[1] = 0x03; value[2] = magnitude >> 14; value[3] = magnitude >> 14; value[4] = magnitude; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends command to set range compatible with G25/G27/Driving Force GT */ static void lg4ff_set_range_g25(struct hid_device *hid, u16 range) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; dbg_hid("G25/G27/DFGT: setting range to %u\n", range); spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x81; value[2] = range & 0x00ff; value[3] = (range & 0xff00) >> 8; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends commands to set range compatible with Driving Force Pro wheel */ static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; int start_left, start_right, full_range; s32 *value; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; dbg_hid("Driving Force Pro: setting range to %u\n", range); /* Prepare "coarse" limit command */ spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x00; /* Set later */ value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; if (range > 200) { value[1] = 0x03; full_range = 900; } else { value[1] = 0x02; full_range = 200; } hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); /* Prepare "fine" limit command */ value[0] = 0x81; value[1] = 0x0b; value[2] = 0x00; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; if (range == 200 || range == 900) { /* Do not apply any fine limit */ hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); return; } /* Construct fine limit command */ start_left = (((full_range - range + 1) * 2047) / full_range); start_right = 0xfff - start_left; value[2] = start_left >> 4; value[3] = start_right >> 4; value[4] = 0xff; value[5] = (start_right & 0xe) << 4 | (start_left & 0xe); value[6] = 0xff; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } static const struct lg4ff_compat_mode_switch *lg4ff_get_mode_switch_command(const u16 real_product_id, const u16 target_product_id) { switch (real_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext01_dfp; /* DFP can only be switched to its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext01_dfp; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: return &lg4ff_mode_switch_ext16_g25; /* G25 can only be switched to DFP mode or its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_G27_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_WHEEL: return &lg4ff_mode_switch_ext09_dfex; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext09_dfp; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: return &lg4ff_mode_switch_ext09_g25; case USB_DEVICE_ID_LOGITECH_G27_WHEEL: return &lg4ff_mode_switch_ext09_g27; /* G27 can only be switched to DF-EX, DFP, G25 or its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_G29_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext09_dfp; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: return &lg4ff_mode_switch_ext09_dfgt; case USB_DEVICE_ID_LOGITECH_G25_WHEEL: return &lg4ff_mode_switch_ext09_g25; case USB_DEVICE_ID_LOGITECH_G27_WHEEL: return &lg4ff_mode_switch_ext09_g27; case USB_DEVICE_ID_LOGITECH_G29_WHEEL: return &lg4ff_mode_switch_ext09_g29; /* G29 can only be switched to DF-EX, DFP, DFGT, G25, G27 or its native mode */ default: return NULL; } break; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: switch (target_product_id) { case USB_DEVICE_ID_LOGITECH_WHEEL: return &lg4ff_mode_switch_ext09_dfex; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: return &lg4ff_mode_switch_ext09_dfp; case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: return &lg4ff_mode_switch_ext09_dfgt; /* DFGT can only be switched to DF-EX, DFP or its native mode */ default: return NULL; } break; /* No other wheels have multiple modes */ default: return NULL; } } static int lg4ff_switch_compatibility_mode(struct hid_device *hid, const struct lg4ff_compat_mode_switch *s) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; unsigned long flags; s32 *value; u8 i; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } value = entry->report->field[0]->value; spin_lock_irqsave(&entry->report_lock, flags); for (i = 0; i < s->cmd_count; i++) { u8 j; for (j = 0; j < 7; j++) value[j] = s->cmd[j + (7*i)]; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); } spin_unlock_irqrestore(&entry->report_lock, flags); hid_hw_wait(hid); return 0; } static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; ssize_t count = 0; int i; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } if (!entry->wdata.real_name) { hid_err(hid, "NULL pointer to string\n"); return 0; } for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) { if (entry->wdata.alternate_modes & BIT(i)) { /* Print tag and full name */ count += scnprintf(buf + count, PAGE_SIZE - count, "%s: %s", lg4ff_alternate_modes[i].tag, !lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name); if (count >= PAGE_SIZE - 1) return count; /* Mark the currently active mode with an asterisk */ if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id || (lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id)) count += scnprintf(buf + count, PAGE_SIZE - count, " *\n"); else count += scnprintf(buf + count, PAGE_SIZE - count, "\n"); if (count >= PAGE_SIZE - 1) return count; } } return count; } static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; const struct lg4ff_compat_mode_switch *s; u16 target_product_id = 0; int i, ret; char *lbuf; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } /* Allow \n at the end of the input parameter */ lbuf = kasprintf(GFP_KERNEL, "%s", buf); if (!lbuf) return -ENOMEM; i = strlen(lbuf); if (i == 0) { kfree(lbuf); return -EINVAL; } if (lbuf[i-1] == '\n') { if (i == 1) { kfree(lbuf); return -EINVAL; } lbuf[i-1] = '\0'; } for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) { const u16 mode_product_id = lg4ff_alternate_modes[i].product_id; const char *tag = lg4ff_alternate_modes[i].tag; if (entry->wdata.alternate_modes & BIT(i)) { if (!strcmp(tag, lbuf)) { if (!mode_product_id) target_product_id = entry->wdata.real_product_id; else target_product_id = mode_product_id; break; } } } if (i == LG4FF_MODE_MAX_IDX) { hid_info(hid, "Requested mode \"%s\" is not supported by the device\n", lbuf); kfree(lbuf); return -EINVAL; } kfree(lbuf); /* Not needed anymore */ if (target_product_id == entry->wdata.product_id) /* Nothing to do */ return count; /* Automatic switching has to be disabled for the switch to DF-EX mode to work correctly */ if (target_product_id == USB_DEVICE_ID_LOGITECH_WHEEL && !lg4ff_no_autoswitch) { hid_info(hid, "\"%s\" cannot be switched to \"DF-EX\" mode. Load the \"hid_logitech\" module with \"lg4ff_no_autoswitch=1\" parameter set and try again\n", entry->wdata.real_name); return -EINVAL; } /* Take care of hardware limitations */ if ((entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) && entry->wdata.product_id > target_product_id) { hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->wdata.real_name, lg4ff_alternate_modes[i].name); return -EINVAL; } s = lg4ff_get_mode_switch_command(entry->wdata.real_product_id, target_product_id); if (!s) { hid_err(hid, "Invalid target product ID %X\n", target_product_id); return -EINVAL; } ret = lg4ff_switch_compatibility_mode(hid, s); return (ret == 0 ? count : ret); } static DEVICE_ATTR(alternate_modes, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_alternate_modes_show, lg4ff_alternate_modes_store); static ssize_t lg4ff_combine_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; size_t count; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.combine); return count; } static ssize_t lg4ff_combine_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; u16 combine = simple_strtoul(buf, NULL, 10); drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } if (combine > 1) combine = 1; entry->wdata.combine = combine; return count; } static DEVICE_ATTR(combine_pedals, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_combine_show, lg4ff_combine_store); /* Export the currently set range of the wheel */ static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; size_t count; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.range); return count; } /* Set range to user specified value, call appropriate function * according to the type of the wheel */ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; u16 range = simple_strtoul(buf, NULL, 10); drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return -EINVAL; } if (range == 0) range = entry->wdata.max_range; /* Check if the wheel supports range setting * and that the range is within limits for the wheel */ if (entry->wdata.set_range && range >= entry->wdata.min_range && range <= entry->wdata.max_range) { entry->wdata.set_range(hid, range); entry->wdata.range = range; } return count; } static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_range_show, lg4ff_range_store); static ssize_t lg4ff_real_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; size_t count; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return 0; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return 0; } if (!entry->wdata.real_tag || !entry->wdata.real_name) { hid_err(hid, "NULL pointer to string\n"); return 0; } count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->wdata.real_tag, entry->wdata.real_name); return count; } static ssize_t lg4ff_real_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { /* Real ID is a read-only value */ return -EPERM; } static DEVICE_ATTR(real_id, S_IRUGO, lg4ff_real_id_show, lg4ff_real_id_store); #ifdef CONFIG_LEDS_CLASS static void lg4ff_set_leds(struct hid_device *hid, u8 leds) { struct lg_drv_data *drv_data; struct lg4ff_device_entry *entry; unsigned long flags; s32 *value; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); return; } value = entry->report->field[0]->value; spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x12; value[2] = leds; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&entry->report_lock, flags); } static void lg4ff_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness value) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct lg_drv_data *drv_data = hid_get_drvdata(hid); struct lg4ff_device_entry *entry; int i, state = 0; if (!drv_data) { hid_err(hid, "Device data not found."); return; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found."); return; } for (i = 0; i < 5; i++) { if (led_cdev != entry->wdata.led[i]) continue; state = (entry->wdata.led_state >> i) & 1; if (value == LED_OFF && state) { entry->wdata.led_state &= ~(1 << i); lg4ff_set_leds(hid, entry->wdata.led_state); } else if (value != LED_OFF && !state) { entry->wdata.led_state |= 1 << i; lg4ff_set_leds(hid, entry->wdata.led_state); } break; } } static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cdev) { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = to_hid_device(dev); struct lg_drv_data *drv_data = hid_get_drvdata(hid); struct lg4ff_device_entry *entry; int i, value = 0; if (!drv_data) { hid_err(hid, "Device data not found."); return LED_OFF; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found."); return LED_OFF; } for (i = 0; i < 5; i++) if (led_cdev == entry->wdata.led[i]) { value = (entry->wdata.led_state >> i) & 1; break; } return value ? LED_FULL : LED_OFF; } #endif static u16 lg4ff_identify_multimode_wheel(struct hid_device *hid, const u16 reported_product_id, const u16 bcdDevice) { u32 current_mode; int i; /* identify current mode from USB PID */ for (i = 1; i < ARRAY_SIZE(lg4ff_alternate_modes); i++) { dbg_hid("Testing whether PID is %X\n", lg4ff_alternate_modes[i].product_id); if (reported_product_id == lg4ff_alternate_modes[i].product_id) break; } if (i == ARRAY_SIZE(lg4ff_alternate_modes)) return 0; current_mode = BIT(i); for (i = 0; i < ARRAY_SIZE(lg4ff_main_checklist); i++) { const u16 mask = lg4ff_main_checklist[i]->mask; const u16 result = lg4ff_main_checklist[i]->result; const u16 real_product_id = lg4ff_main_checklist[i]->real_product_id; if ((current_mode & lg4ff_main_checklist[i]->modes) && \ (bcdDevice & mask) == result) { dbg_hid("Found wheel with real PID %X whose reported PID is %X\n", real_product_id, reported_product_id); return real_product_id; } } /* No match found. This is either Driving Force or an unknown * wheel model, do not touch it */ dbg_hid("Wheel with bcdDevice %X was not recognized as multimode wheel, leaving in its current mode\n", bcdDevice); return 0; } static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_product_id, const u16 bcdDevice) { const u16 reported_product_id = hid->product; int ret; *real_product_id = lg4ff_identify_multimode_wheel(hid, reported_product_id, bcdDevice); /* Probed wheel is not a multimode wheel */ if (!*real_product_id) { *real_product_id = reported_product_id; dbg_hid("Wheel is not a multimode wheel\n"); return LG4FF_MMODE_NOT_MULTIMODE; } /* Switch from "Driving Force" mode to native mode automatically. * Otherwise keep the wheel in its current mode */ if (reported_product_id == USB_DEVICE_ID_LOGITECH_WHEEL && reported_product_id != *real_product_id && !lg4ff_no_autoswitch) { const struct lg4ff_compat_mode_switch *s = lg4ff_get_mode_switch_command(*real_product_id, *real_product_id); if (!s) { hid_err(hid, "Invalid product id %X\n", *real_product_id); return LG4FF_MMODE_NOT_MULTIMODE; } ret = lg4ff_switch_compatibility_mode(hid, s); if (ret) { /* Wheel could not have been switched to native mode, * leave it in "Driving Force" mode and continue */ hid_err(hid, "Unable to switch wheel mode, errno %d\n", ret); return LG4FF_MMODE_IS_MULTIMODE; } return LG4FF_MMODE_SWITCHED; } return LG4FF_MMODE_IS_MULTIMODE; } int lg4ff_init(struct hid_device *hid) { struct hid_input *hidinput; struct input_dev *dev; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice); const struct lg4ff_multimode_wheel *mmode_wheel = NULL; struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; int error, i, j; int mmode_ret, mmode_idx = -1; u16 real_product_id; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -1; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Cannot add device, private driver data not allocated\n"); return -1; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; spin_lock_init(&entry->report_lock); entry->report = report; drv_data->device_props = entry; /* Check if a multimode wheel has been connected and * handle it appropriately */ mmode_ret = lg4ff_handle_multimode_wheel(hid, &real_product_id, bcdDevice); /* Wheel has been told to switch to native mode. There is no point in going on * with the initialization as the wheel will do a USB reset when it switches mode */ if (mmode_ret == LG4FF_MMODE_SWITCHED) return 0; else if (mmode_ret < 0) { hid_err(hid, "Unable to switch device mode during initialization, errno %d\n", mmode_ret); error = mmode_ret; goto err_init; } /* Check what wheel has been connected */ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) { if (hid->product == lg4ff_devices[i].product_id) { dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id); break; } } if (i == ARRAY_SIZE(lg4ff_devices)) { hid_err(hid, "This device is flagged to be handled by the lg4ff module but this module does not know how to handle it. " "Please report this as a bug to LKML, Simon Wood <simon@mungewell.org> or " "Michal Maly <madcatxster@devoid-pointer.net>\n"); error = -1; goto err_init; } if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { for (mmode_idx = 0; mmode_idx < ARRAY_SIZE(lg4ff_multimode_wheels); mmode_idx++) { if (real_product_id == lg4ff_multimode_wheels[mmode_idx].product_id) break; } if (mmode_idx == ARRAY_SIZE(lg4ff_multimode_wheels)) { hid_err(hid, "Device product ID %X is not listed as a multimode wheel", real_product_id); error = -1; goto err_init; } } /* Set supported force feedback capabilities */ for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++) set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit); error = input_ff_create_memless(dev, NULL, lg4ff_play); if (error) goto err_init; /* Initialize device properties */ if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { if (WARN_ON(mmode_idx == -1)) return -EINVAL; mmode_wheel = &lg4ff_multimode_wheels[mmode_idx]; } lg4ff_init_wheel_data(&entry->wdata, &lg4ff_devices[i], mmode_wheel, real_product_id); /* Check if autocentering is available and * set the centering force to zero by default */ if (test_bit(FF_AUTOCENTER, dev->ffbit)) { /* Formula Force EX expects different autocentering command */ if ((bcdDevice >> 8) == LG4FF_FFEX_REV_MAJ && (bcdDevice & 0xff) == LG4FF_FFEX_REV_MIN) dev->ff->set_autocenter = lg4ff_set_autocenter_ffex; else dev->ff->set_autocenter = lg4ff_set_autocenter_default; dev->ff->set_autocenter(dev, 0); } /* Create sysfs interface */ error = device_create_file(&hid->dev, &dev_attr_combine_pedals); if (error) hid_warn(hid, "Unable to create sysfs interface for \"combine\", errno %d\n", error); error = device_create_file(&hid->dev, &dev_attr_range); if (error) hid_warn(hid, "Unable to create sysfs interface for \"range\", errno %d\n", error); if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { error = device_create_file(&hid->dev, &dev_attr_real_id); if (error) hid_warn(hid, "Unable to create sysfs interface for \"real_id\", errno %d\n", error); error = device_create_file(&hid->dev, &dev_attr_alternate_modes); if (error) hid_warn(hid, "Unable to create sysfs interface for \"alternate_modes\", errno %d\n", error); } dbg_hid("sysfs interface created\n"); /* Set the maximum range to start with */ entry->wdata.range = entry->wdata.max_range; if (entry->wdata.set_range) entry->wdata.set_range(hid, entry->wdata.range); #ifdef CONFIG_LEDS_CLASS /* register led subsystem - G27/G29 only */ entry->wdata.led_state = 0; for (j = 0; j < 5; j++) entry->wdata.led[j] = NULL; if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_G27_WHEEL || lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_G29_WHEEL) { struct led_classdev *led; size_t name_sz; char *name; lg4ff_set_leds(hid, 0); name_sz = strlen(dev_name(&hid->dev)) + 8; for (j = 0; j < 5; j++) { led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL); if (!led) { hid_err(hid, "can't allocate memory for LED %d\n", j); goto err_leds; } name = (void *)(&led[1]); snprintf(name, name_sz, "%s::RPM%d", dev_name(&hid->dev), j+1); led->name = name; led->brightness = 0; led->max_brightness = 1; led->brightness_get = lg4ff_led_get_brightness; led->brightness_set = lg4ff_led_set_brightness; entry->wdata.led[j] = led; error = led_classdev_register(&hid->dev, led); if (error) { hid_err(hid, "failed to register LED %d. Aborting.\n", j); err_leds: /* Deregister LEDs (if any) */ for (j = 0; j < 5; j++) { led = entry->wdata.led[j]; entry->wdata.led[j] = NULL; if (!led) continue; led_classdev_unregister(led); kfree(led); } goto out; /* Let the driver continue without LEDs */ } } } out: #endif hid_info(hid, "Force feedback support for Logitech Gaming Wheels\n"); return 0; err_init: drv_data->device_props = NULL; kfree(entry); return error; } int lg4ff_deinit(struct hid_device *hid) { struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Error while deinitializing device, no private driver data.\n"); return -1; } entry = drv_data->device_props; if (!entry) goto out; /* Nothing more to do */ /* Multimode devices will have at least the "MODE_NATIVE" bit set */ if (entry->wdata.alternate_modes) { device_remove_file(&hid->dev, &dev_attr_real_id); device_remove_file(&hid->dev, &dev_attr_alternate_modes); } device_remove_file(&hid->dev, &dev_attr_combine_pedals); device_remove_file(&hid->dev, &dev_attr_range); #ifdef CONFIG_LEDS_CLASS { int j; struct led_classdev *led; /* Deregister LEDs (if any) */ for (j = 0; j < 5; j++) { led = entry->wdata.led[j]; entry->wdata.led[j] = NULL; if (!led) continue; led_classdev_unregister(led); kfree(led); } } #endif drv_data->device_props = NULL; kfree(entry); out: dbg_hid("Device successfully unregistered\n"); return 0; }
14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/netdevice.h> #include <net/netdev_lock.h> #include "dev.h" /** * dev_change_name() - change name of a device * @dev: device * @newname: name (or format string) must be at least IFNAMSIZ * * Change name of a device, can pass format strings "eth%d". * for wildcarding. * * Return: 0 on success, -errno on failure. */ int dev_change_name(struct net_device *dev, const char *newname) { int ret; netdev_lock_ops(dev); ret = netif_change_name(dev, newname); netdev_unlock_ops(dev); return ret; } /** * dev_set_alias() - change ifalias of a device * @dev: device * @alias: name up to IFALIASZ * @len: limit of bytes to copy from info * * Set ifalias for a device. * * Return: 0 on success, -errno on failure. */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { int ret; netdev_lock_ops(dev); ret = netif_set_alias(dev, alias, len); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL(dev_set_alias); /** * dev_change_flags() - change device settings * @dev: device * @flags: device state flags * @extack: netlink extended ack * * Change settings on device based state flags. The flags are * in the userspace exported format. * * Return: 0 on success, -errno on failure. */ int dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack) { int ret; netdev_lock_ops(dev); ret = netif_change_flags(dev, flags, extack); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL(dev_change_flags); /** * dev_set_group() - change group this device belongs to * @dev: device * @new_group: group this device should belong to */ void dev_set_group(struct net_device *dev, int new_group) { netdev_lock_ops(dev); netif_set_group(dev, new_group); netdev_unlock_ops(dev); } int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack) { int ret; down_write(&dev_addr_sem); netdev_lock_ops(dev); ret = netif_set_mac_address(dev, sa, extack); netdev_unlock_ops(dev); up_write(&dev_addr_sem); return ret; } EXPORT_SYMBOL(dev_set_mac_address_user); /** * dev_change_net_namespace() - move device to different nethost namespace * @dev: device * @net: network namespace * @pat: If not NULL name pattern to try if the current device name * is already taken in the destination network namespace. * * This function shuts down a device interface and moves it * to a new network namespace. On success 0 is returned, on * a failure a netagive errno code is returned. * * Callers must hold the rtnl semaphore. * * Return: 0 on success, -errno on failure. */ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) { return __dev_change_net_namespace(dev, net, pat, 0, NULL); } EXPORT_SYMBOL_GPL(dev_change_net_namespace); /** * dev_change_carrier() - change device carrier * @dev: device * @new_carrier: new value * * Change device carrier * * Return: 0 on success, -errno on failure. */ int dev_change_carrier(struct net_device *dev, bool new_carrier) { int ret; netdev_lock_ops(dev); ret = netif_change_carrier(dev, new_carrier); netdev_unlock_ops(dev); return ret; } /** * dev_change_tx_queue_len() - change TX queue length of a netdevice * @dev: device * @new_len: new tx queue length * * Return: 0 on success, -errno on failure. */ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) { int ret; netdev_lock_ops(dev); ret = netif_change_tx_queue_len(dev, new_len); netdev_unlock_ops(dev); return ret; } /** * dev_change_proto_down() - set carrier according to proto_down * @dev: device * @proto_down: new value * * Return: 0 on success, -errno on failure. */ int dev_change_proto_down(struct net_device *dev, bool proto_down) { int ret; netdev_lock_ops(dev); ret = netif_change_proto_down(dev, proto_down); netdev_unlock_ops(dev); return ret; } /** * dev_open() - prepare an interface for use * @dev: device to open * @extack: netlink extended ack * * Takes a device from down to up state. The device's private open * function is invoked and then the multicast lists are loaded. Finally * the device is moved into the up state and a %NETDEV_UP message is * sent to the netdev notifier chain. * * Calling this function on an active interface is a nop. On a failure * a negative errno code is returned. * * Return: 0 on success, -errno on failure. */ int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) { int ret; netdev_lock_ops(dev); ret = netif_open(dev, extack); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL(dev_open); /** * dev_close() - shutdown an interface * @dev: device to shutdown * * This function moves an active device into down state. A * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier * chain. */ void dev_close(struct net_device *dev) { netdev_lock_ops(dev); netif_close(dev); netdev_unlock_ops(dev); } EXPORT_SYMBOL(dev_close); int dev_eth_ioctl(struct net_device *dev, struct ifreq *ifr, unsigned int cmd) { const struct net_device_ops *ops = dev->netdev_ops; int ret = -ENODEV; if (!ops->ndo_eth_ioctl) return -EOPNOTSUPP; netdev_lock_ops(dev); if (netif_device_present(dev)) ret = ops->ndo_eth_ioctl(dev, ifr, cmd); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL(dev_eth_ioctl); int dev_set_mtu(struct net_device *dev, int new_mtu) { int ret; netdev_lock_ops(dev); ret = netif_set_mtu(dev, new_mtu); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL(dev_set_mtu); /** * dev_disable_lro() - disable Large Receive Offload on a device * @dev: device * * Disable Large Receive Offload (LRO) on a net device. Must be * called under RTNL. This is needed if received packets may be * forwarded to another interface. */ void dev_disable_lro(struct net_device *dev) { netdev_lock_ops(dev); netif_disable_lro(dev); netdev_unlock_ops(dev); } EXPORT_SYMBOL(dev_disable_lro); /** * dev_set_allmulti() - update allmulti count on a device * @dev: device * @inc: modifier * * Add or remove reception of all multicast frames to a device. While the * count in the device remains above zero the interface remains listening * to all interfaces. Once it hits zero the device reverts back to normal * filtering operation. A negative @inc value is used to drop the counter * when releasing a resource needing all multicasts. * * Return: 0 on success, -errno on failure. */ int dev_set_allmulti(struct net_device *dev, int inc) { int ret; netdev_lock_ops(dev); ret = netif_set_allmulti(dev, inc, true); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL(dev_set_allmulti); /** * dev_set_mac_address() - change Media Access Control Address * @dev: device * @sa: new address * @extack: netlink extended ack * * Change the hardware (MAC) address of the device * * Return: 0 on success, -errno on failure. */ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack) { int ret; netdev_lock_ops(dev); ret = netif_set_mac_address(dev, sa, extack); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL(dev_set_mac_address); int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) { int ret; netdev_lock_ops(dev); ret = netif_xdp_propagate(dev, bpf); netdev_unlock_ops(dev); return ret; } EXPORT_SYMBOL_GPL(dev_xdp_propagate); /** * netdev_state_change() - device changes state * @dev: device to cause notification * * Called to indicate a device has changed state. This function calls * the notifier chains for netdev_chain and sends a NEWLINK message * to the routing socket. */ void netdev_state_change(struct net_device *dev) { netdev_lock_ops(dev); netif_state_change(dev); netdev_unlock_ops(dev); } EXPORT_SYMBOL(netdev_state_change);
14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 // SPDX-License-Identifier: GPL-2.0 /* * Implementation of the symbol table type. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include "symtab.h" static unsigned int symhash(const void *key) { /* * djb2a * Public domain from cdb v0.75 */ unsigned int hash = 5381; unsigned char c; while ((c = *(const unsigned char *)key++)) hash = ((hash << 5) + hash) ^ c; return hash; } static int symcmp(const void *key1, const void *key2) { const char *keyp1, *keyp2; keyp1 = key1; keyp2 = key2; return strcmp(keyp1, keyp2); } static const struct hashtab_key_params symtab_key_params = { .hash = symhash, .cmp = symcmp, }; int symtab_init(struct symtab *s, u32 size) { s->nprim = 0; return hashtab_init(&s->table, size); } int symtab_insert(struct symtab *s, char *name, void *datum) { return hashtab_insert(&s->table, name, datum, symtab_key_params); } void *symtab_search(struct symtab *s, const char *name) { return hashtab_search(&s->table, name, symtab_key_params); }
10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 // SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/etherdevice.h> #include "htt.h" #include "mac.h" #include "hif.h" #include "txrx.h" #include "debug.h" static u8 ath10k_htt_tx_txq_calc_size(size_t count) { int exp; int factor; exp = 0; factor = count >> 7; while (factor >= 64 && exp < 4) { factor >>= 3; exp++; } if (exp == 4) return 0xff; if (count > 0) factor = max(1, factor); return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); } static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; struct ath10k_sta *arsta; struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; unsigned long byte_cnt; int idx; u32 bit; u16 peer_id; u8 tid; u8 count; lockdep_assert_held(&ar->htt.tx_lock); if (!ar->htt.tx_q_state.enabled) return; if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) return; if (txq->sta) { arsta = (void *)txq->sta->drv_priv; peer_id = arsta->peer_id; } else { peer_id = arvif->peer_id; } tid = txq->tid; bit = BIT(peer_id % 32); idx = peer_id / 32; ieee80211_txq_get_depth(txq, NULL, &byte_cnt); count = ath10k_htt_tx_txq_calc_size(byte_cnt); if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || unlikely(tid >= ar->htt.tx_q_state.num_tids)) { ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n", peer_id, tid); return; } ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n", peer_id, tid, count); } static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) { u32 seq; size_t size; lockdep_assert_held(&ar->htt.tx_lock); if (!ar->htt.tx_q_state.enabled) return; if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) return; seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); seq++; ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", seq); size = sizeof(*ar->htt.tx_q_state.vaddr); dma_sync_single_for_device(ar->dev, ar->htt.tx_q_state.paddr, size, DMA_TO_DEVICE); } void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; spin_lock_bh(&ar->htt.tx_lock); __ath10k_htt_tx_txq_recalc(hw, txq); spin_unlock_bh(&ar->htt.tx_lock); } void ath10k_htt_tx_txq_sync(struct ath10k *ar) { spin_lock_bh(&ar->htt.tx_lock); __ath10k_htt_tx_txq_sync(ar); spin_unlock_bh(&ar->htt.tx_lock); } void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; spin_lock_bh(&ar->htt.tx_lock); __ath10k_htt_tx_txq_recalc(hw, txq); __ath10k_htt_tx_txq_sync(ar); spin_unlock_bh(&ar->htt.tx_lock); } void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) { lockdep_assert_held(&htt->tx_lock); htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); if (htt->num_pending_tx == 0) wake_up(&htt->empty_tx_wq); } int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) { lockdep_assert_held(&htt->tx_lock); if (htt->num_pending_tx >= htt->max_num_pending_tx) return -EBUSY; htt->num_pending_tx++; if (htt->num_pending_tx == htt->max_num_pending_tx) ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); return 0; } int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, bool is_presp) { struct ath10k *ar = htt->ar; lockdep_assert_held(&htt->tx_lock); if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) return 0; if (is_presp && ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) return -EBUSY; htt->num_pending_mgmt_tx++; return 0; } void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) { lockdep_assert_held(&htt->tx_lock); if (!htt->ar->hw_params.max_probe_resp_desc_thres) return; htt->num_pending_mgmt_tx--; } int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) { struct ath10k *ar = htt->ar; int ret; spin_lock_bh(&htt->tx_lock); ret = idr_alloc(&htt->pending_tx, skb, 0, htt->max_num_pending_tx, GFP_ATOMIC); spin_unlock_bh(&htt->tx_lock); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); return ret; } void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) { struct ath10k *ar = htt->ar; lockdep_assert_held(&htt->tx_lock); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id); idr_remove(&htt->pending_tx, msdu_id); } static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!htt->txbuf.vaddr_txbuff_32) return; size = htt->txbuf.size; dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32, htt->txbuf.paddr); htt->txbuf.vaddr_txbuff_32 = NULL; } static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf_32); htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, GFP_KERNEL); if (!htt->txbuf.vaddr_txbuff_32) return -ENOMEM; htt->txbuf.size = size; return 0; } static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!htt->txbuf.vaddr_txbuff_64) return; size = htt->txbuf.size; dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64, htt->txbuf.paddr); htt->txbuf.vaddr_txbuff_64 = NULL; } static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf_64); htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, GFP_KERNEL); if (!htt->txbuf.vaddr_txbuff_64) return -ENOMEM; htt->txbuf.size = size; return 0; } static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt) { size_t size; if (!htt->frag_desc.vaddr_desc_32) return; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr_desc_32, htt->frag_desc.paddr); htt->frag_desc.vaddr_desc_32 = NULL; } static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!ar->hw_params.continuous_frag_desc) return 0; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size, &htt->frag_desc.paddr, GFP_KERNEL); if (!htt->frag_desc.vaddr_desc_32) { ath10k_err(ar, "failed to alloc fragment desc memory\n"); return -ENOMEM; } htt->frag_desc.size = size; return 0; } static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt) { size_t size; if (!htt->frag_desc.vaddr_desc_64) return; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc_64); dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr_desc_64, htt->frag_desc.paddr); htt->frag_desc.vaddr_desc_64 = NULL; } static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!ar->hw_params.continuous_frag_desc) return 0; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc_64); htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size, &htt->frag_desc.paddr, GFP_KERNEL); if (!htt->frag_desc.vaddr_desc_64) { ath10k_err(ar, "failed to alloc fragment desc memory\n"); return -ENOMEM; } htt->frag_desc.size = size; return 0; } static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) return; size = sizeof(*htt->tx_q_state.vaddr); dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); kfree(htt->tx_q_state.vaddr); } static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; int ret; if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) return 0; htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; size = sizeof(*htt->tx_q_state.vaddr); htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); if (!htt->tx_q_state.vaddr) return -ENOMEM; htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, size, DMA_TO_DEVICE); ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); if (ret) { ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); kfree(htt->tx_q_state.vaddr); return -EIO; } return 0; } static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt) { WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); kfifo_free(&htt->txdone_fifo); } static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt) { int ret; size_t size; size = roundup_pow_of_two(htt->max_num_pending_tx); ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); return ret; } static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret; ret = ath10k_htt_alloc_txbuff(htt); if (ret) { ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); return ret; } ret = ath10k_htt_alloc_frag_desc(htt); if (ret) { ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; } ret = ath10k_htt_tx_alloc_txq(htt); if (ret) { ath10k_err(ar, "failed to alloc txq: %d\n", ret); goto free_frag_desc; } ret = ath10k_htt_tx_alloc_txdone_fifo(htt); if (ret) { ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); goto free_txq; } return 0; free_txq: ath10k_htt_tx_free_txq(htt); free_frag_desc: ath10k_htt_free_frag_desc(htt); free_txbuf: ath10k_htt_free_txbuff(htt); return ret; } int ath10k_htt_tx_start(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); if (htt->tx_mem_allocated) return 0; if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return 0; ret = ath10k_htt_tx_alloc_buf(htt); if (ret) goto free_idr_pending_tx; htt->tx_mem_allocated = true; return 0; free_idr_pending_tx: idr_destroy(&htt->pending_tx); return ret; } static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) { struct ath10k *ar = ctx; struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {0}; ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id); tx_done.msdu_id = msdu_id; tx_done.status = HTT_TX_COMPL_STATE_DISCARD; ath10k_txrx_tx_unref(htt, &tx_done); return 0; } void ath10k_htt_tx_destroy(struct ath10k_htt *htt) { if (!htt->tx_mem_allocated) return; ath10k_htt_free_txbuff(htt); ath10k_htt_tx_free_txq(htt); ath10k_htt_free_frag_desc(htt); ath10k_htt_tx_free_txdone_fifo(htt); htt->tx_mem_allocated = false; } static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) { ath10k_htc_stop_hl(htt->ar); idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); } void ath10k_htt_tx_stop(struct ath10k_htt *htt) { ath10k_htt_flush_tx_queue(htt); idr_destroy(&htt->pending_tx); } void ath10k_htt_tx_free(struct ath10k_htt *htt) { ath10k_htt_tx_stop(htt); ath10k_htt_tx_destroy(htt); } void ath10k_htt_op_ep_tx_credits(struct ath10k *ar) { queue_work(ar->workqueue, &ar->bundle_tx_work); } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {0}; struct htt_cmd_hdr *htt_hdr; struct htt_data_tx_desc *desc_hdr = NULL; u16 flags1 = 0; u8 msg_type = 0; if (htt->disable_tx_comp) { htt_hdr = (struct htt_cmd_hdr *)skb->data; msg_type = htt_hdr->msg_type; if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) { desc_hdr = (struct htt_data_tx_desc *) (skb->data + sizeof(*htt_hdr)); flags1 = __le16_to_cpu(desc_hdr->flags1); skb_pull(skb, sizeof(struct htt_cmd_hdr)); skb_pull(skb, sizeof(struct htt_data_tx_desc)); } } dev_kfree_skb_any(skb); if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM)) return; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx complete msdu id:%u ,flags1:%x\n", __le16_to_cpu(desc_hdr->id), flags1); if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE) return; tx_done.status = HTT_TX_COMPL_STATE_ACK; tx_done.msdu_id = __le16_to_cpu(desc_hdr->id); ath10k_txrx_tx_unref(&ar->htt, &tx_done); } void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) { dev_kfree_skb_any(skb); } EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; int ret; len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, u64 cookie) { struct ath10k *ar = htt->ar; struct htt_stats_req *req; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0, ret; len += sizeof(cmd->hdr); len += sizeof(cmd->stats_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; req = &cmd->stats_req; memset(req, 0, sizeof(*req)); /* currently we support only max 24 bit masks so no need to worry * about endian support */ memcpy(req->upload_types, &mask, 3); memcpy(req->reset_types, &reset_mask, 3); req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send htt type stats request: %d", ret); dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_frag_desc_bank_cfg32 *cfg; int ret, size; u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; info = 0; info |= SM(htt->tx_q_state.type, HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; cfg = &cmd->frag_desc_bank_cfg32; cfg->info = info; cfg->num_banks = 1; cfg->desc_size = sizeof(struct htt_msdu_ext_desc); cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); cfg->bank_id[0].bank_min_id = 0; cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_frag_desc_bank_cfg64 *cfg; int ret, size; u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; info = 0; info |= SM(htt->tx_q_state.type, HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; cfg = &cmd->frag_desc_bank_cfg64; cfg->info = info; cfg->num_banks = 1; cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64); cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr); cfg->bank_id[0].bank_min_id = 0; cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); dev_kfree_skb_any(skb); return ret; } return 0; } static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, struct htt_rx_ring_setup_ring32 *rx_ring) { ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, struct htt_rx_ring_setup_ring64 *rx_ring) { ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct ath10k_hw_params *hw = &ar->hw_params; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring32 *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; int len; int ret; /* * the HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup_32.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup_32.hdr.num_rings = 1; /* FIXME: do we need all of this? */ flags = 0; flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_PPDU_START; flags |= HTT_RX_RING_FLAGS_PPDU_END; flags |= HTT_RX_RING_FLAGS_MPDU_START; flags |= HTT_RX_RING_FLAGS_MPDU_END; flags |= HTT_RX_RING_FLAGS_MSDU_START; flags |= HTT_RX_RING_FLAGS_MSDU_END; flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; flags |= HTT_RX_RING_FLAGS_FRAG_INFO; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; flags |= HTT_RX_RING_FLAGS_CTRL_RX; flags |= HTT_RX_RING_FLAGS_MGMT_RX; flags |= HTT_RX_RING_FLAGS_NULL_RX; flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ring->fw_idx_shadow_reg_paddr = __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); ath10k_htt_fill_rx_desc_offset_32(hw, ring); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct ath10k_hw_params *hw = &ar->hw_params; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring64 *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; int len; int ret; /* HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup_64.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup_64.hdr.num_rings = 1; flags = 0; flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_PPDU_START; flags |= HTT_RX_RING_FLAGS_PPDU_END; flags |= HTT_RX_RING_FLAGS_MPDU_START; flags |= HTT_RX_RING_FLAGS_MPDU_END; flags |= HTT_RX_RING_FLAGS_MSDU_START; flags |= HTT_RX_RING_FLAGS_MSDU_END; flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; flags |= HTT_RX_RING_FLAGS_FRAG_INFO; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; flags |= HTT_RX_RING_FLAGS_CTRL_RX; flags |= HTT_RX_RING_FLAGS_MGMT_RX; flags |= HTT_RX_RING_FLAGS_NULL_RX; flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); ath10k_htt_fill_rx_desc_offset_64(hw, ring); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring32 *ring; const int num_rx_ring = 1; u16 flags; int len; int ret; /* * the HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup_32.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup_32.hdr.num_rings = 1; flags = 0; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; memset(ring, 0, sizeof(*ring)); ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { struct ath10k *ar = htt->ar; struct htt_aggr_conf *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; int len; int ret; /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) return -EINVAL; if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) return -EINVAL; len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; aggr_conf = &cmd->aggr_conf; aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { struct ath10k *ar = htt->ar; struct htt_aggr_conf_v2 *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; int len; int ret; /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) return -EINVAL; if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) return -EINVAL; len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf_v2); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; aggr_conf = &cmd->aggr_conf_v2; aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_tx_fetch_resp(struct ath10k *ar, __le32 token, __le16 fetch_seq_num, struct htt_tx_fetch_record *records, size_t num_records) { struct sk_buff *skb; struct htt_cmd *cmd; const u16 resp_id = 0; int len = 0; int ret; /* Response IDs are echo-ed back only for host driver convenience * purposes. They aren't used for anything in the driver yet so use 0. */ len += sizeof(cmd->hdr); len += sizeof(cmd->tx_fetch_resp); len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); cmd->tx_fetch_resp.token = token; memcpy(cmd->tx_fetch_resp.records, records, sizeof(records[0]) * num_records); ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); if (ret) { ath10k_warn(ar, "failed to submit htc command: %d\n", ret); goto err_free_skb; } return 0; err_free_skb: dev_kfree_skb_any(skb); return ret; } static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); struct ath10k_vif *arvif; if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { return ar->scan.vdev_id; } else if (cb->vif) { arvif = (void *)cb->vif->drv_priv; return arvif->vdev_id; } else if (ar->monitor_started) { return ar->monitor_vdev_id; } else { return 0; } } static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) return HTT_DATA_TX_EXT_TID_MGMT; else if (cb->flags & ATH10K_SKB_F_QOS) return skb->priority & IEEE80211_QOS_CTL_TID_MASK; else return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; } int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; const u8 *peer_addr; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) goto err; msdu_id = res; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { peer_addr = hdr->addr1; if (is_multicast_ether_addr(peer_addr)) { skb_put(msdu, sizeof(struct ieee80211_mmie_16)); } else { if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP || skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) skb_put(msdu, IEEE80211_GCMP_MIC_LEN); else skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_txdesc; } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; } #define HTT_TX_HL_NEEDED_HEADROOM \ (unsigned int)(sizeof(struct htt_cmd_hdr) + \ sizeof(struct htt_data_tx_desc) + \ sizeof(struct ath10k_htc_hdr)) static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; int res, data_len; struct htt_cmd_hdr *cmd_hdr; struct htt_data_tx_desc *tx_desc; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *tmp_skb; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); u8 flags0 = 0; u16 flags1 = 0; u16 msdu_id = 0; if (!is_eth) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } data_len = msdu->len; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; fallthrough; case ATH10K_HW_TXRX_ETHERNET: flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; if (htt->disable_tx_comp) flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE; break; } if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; } /* Prepend the HTT header and TX desc struct to the data message * and realloc the skb if it does not have enough headroom. */ if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) { tmp_skb = msdu; ath10k_dbg(htt->ar, ATH10K_DBG_HTT, "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n", skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM); msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM); kfree_skb(tmp_skb); if (!msdu) { ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n"); res = -ENOMEM; goto out; } } if (ar->bus_param.hl_msdu_ids) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) { ath10k_err(ar, "msdu_id allocation failed %d\n", res); goto out; } msdu_id = res; } /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase * reference by one to avoid a use-after-free case and a double * free. */ skb_get(msdu); skb_push(msdu, sizeof(*cmd_hdr)); skb_push(msdu, sizeof(*tx_desc)); cmd_hdr = (struct htt_cmd_hdr *)msdu->data; tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr)); cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM; tx_desc->flags0 = flags0; tx_desc->flags1 = __cpu_to_le16(flags1); tx_desc->len = __cpu_to_le16(data_len); tx_desc->id = __cpu_to_le16(msdu_id); tx_desc->frags_paddr = 0; /* always zero */ /* Initialize peer_id to INVALID_PEER because this is NOT * Reinjection path */ tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu); out: return res; } static int ath10k_htt_tx_32(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct ath10k_htt_txbuf_32 *txbuf; struct htt_data_tx_desc_frag *frags; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; u16 freq = 0; u32 frags_paddr = 0; u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; struct htt_msdu_ext_desc *ext_desc_t = NULL; res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) goto err; msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id; txbuf_paddr = htt->txbuf.paddr + (sizeof(struct ath10k_htt_txbuf_32) * msdu_id); if (!is_eth) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_msdu_id; } if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; fallthrough; case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_32; memset(&ext_desc_t[msdu_id], 0, sizeof(struct htt_msdu_ext_desc)); frags = (struct htt_data_tx_desc_frag *) &ext_desc_t[msdu_id].frags; ext_desc = &ext_desc_t[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = 0; frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc) * msdu_id); } else { frags = txbuf->frags; frags[0].dword_addr.paddr = __cpu_to_le32(skb_cb->paddr); frags[0].dword_addr.len = __cpu_to_le32(msdu->len); frags[1].dword_addr.paddr = 0; frags[1].dword_addr.len = 0; frags_paddr = txbuf_paddr; } flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; frags_paddr = skb_cb->paddr; break; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ txbuf->htc_hdr.eid = htt->eid; txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx) + prefetch_len); txbuf->htc_hdr.flags = 0; if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; if (ar->hw_params.continuous_frag_desc) ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; txbuf->cmd_tx.flags0 = flags0; txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); if (ath10k_mac_tx_frm_has_freq(ar)) { txbuf->cmd_tx.offchan_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); txbuf->cmd_tx.offchan_tx.freq = __cpu_to_le16(freq); } else { txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); } trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", flags0, flags1, msdu->len, msdu_id, &frags_paddr, &skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &txbuf->htc_hdr; sg_items[0].paddr = txbuf_paddr + sizeof(txbuf->frags); sg_items[0].len = sizeof(txbuf->htc_hdr) + sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; } static int ath10k_htt_tx_64(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct ath10k_htt_txbuf_64 *txbuf; struct htt_data_tx_desc_frag *frags; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; u16 freq = 0; dma_addr_t frags_paddr = 0; dma_addr_t txbuf_paddr; struct htt_msdu_ext_desc_64 *ext_desc = NULL; struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) goto err; msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id; txbuf_paddr = htt->txbuf.paddr + (sizeof(struct ath10k_htt_txbuf_64) * msdu_id); if (!is_eth) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_msdu_id; } if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; fallthrough; case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_64; memset(&ext_desc_t[msdu_id], 0, sizeof(struct htt_msdu_ext_desc_64)); frags = (struct htt_data_tx_desc_frag *) &ext_desc_t[msdu_id].frags; ext_desc = &ext_desc_t[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = __cpu_to_le16(upper_32_bits(skb_cb->paddr)); frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc_64) * msdu_id); } else { frags = txbuf->frags; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = __cpu_to_le16(upper_32_bits(skb_cb->paddr)); frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags[1].tword_addr.paddr_lo = 0; frags[1].tword_addr.paddr_hi = 0; frags[1].tword_addr.len_16 = 0; } flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; frags_paddr = skb_cb->paddr; break; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ txbuf->htc_hdr.eid = htt->eid; txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx) + prefetch_len); txbuf->htc_hdr.flags = 0; if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; if (ar->hw_params.continuous_frag_desc) { memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); ext_desc->tso_flag[3] |= __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); } } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; txbuf->cmd_tx.flags0 = flags0; txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); /* fill fragment descriptor */ txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr); if (ath10k_mac_tx_frm_has_freq(ar)) { txbuf->cmd_tx.offchan_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); txbuf->cmd_tx.offchan_tx.freq = __cpu_to_le16(freq); } else { txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); } trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", flags0, flags1, msdu->len, msdu_id, &frags_paddr, &skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &txbuf->htc_hdr; sg_items[0].paddr = txbuf_paddr + sizeof(txbuf->frags); sg_items[0].len = sizeof(txbuf->htc_hdr) + sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; } static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32, .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32, .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32, .htt_tx = ath10k_htt_tx_32, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, }; static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64, .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64, .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64, .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64, .htt_tx = ath10k_htt_tx_64, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2, }; static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl, .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, .htt_tx = ath10k_htt_tx_hl, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, .htt_flush_tx = ath10k_htt_flush_tx_queue, }; void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) htt->tx_ops = &htt_tx_ops_hl; else if (ar->hw_params.target_64bit) htt->tx_ops = &htt_tx_ops_64; else htt->tx_ops = &htt_tx_ops_32; }
3 3 3 3 3 3 3 3 3 3 3 4 4 4 1 3 3 3 3 3 3 4 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 // SPDX-License-Identifier: GPL-2.0 /* * Greybus Host Device * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/greybus.h> #include "greybus_trace.h" EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in); EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit); static struct ida gb_hd_bus_id_map; int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, bool async) { if (!hd || !hd->driver || !hd->driver->output) return -EINVAL; return hd->driver->output(hd, req, size, cmd, async); } EXPORT_SYMBOL_GPL(gb_hd_output); static ssize_t bus_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_host_device *hd = to_gb_host_device(dev); return sprintf(buf, "%d\n", hd->bus_id); } static DEVICE_ATTR_RO(bus_id); static struct attribute *bus_attrs[] = { &dev_attr_bus_id.attr, NULL }; ATTRIBUTE_GROUPS(bus); int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id) { struct ida *id_map = &hd->cport_id_map; int ret; ret = ida_alloc_range(id_map, cport_id, cport_id, GFP_KERNEL); if (ret < 0) { dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id); return ret; } return 0; } EXPORT_SYMBOL_GPL(gb_hd_cport_reserve); void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id) { struct ida *id_map = &hd->cport_id_map; ida_free(id_map, cport_id); } EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved); /* Locking: Caller guarantees serialisation */ int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, unsigned long flags) { struct ida *id_map = &hd->cport_id_map; int ida_start, ida_end; if (hd->driver->cport_allocate) return hd->driver->cport_allocate(hd, cport_id, flags); if (cport_id < 0) { ida_start = 0; ida_end = hd->num_cports - 1; } else if (cport_id < hd->num_cports) { ida_start = cport_id; ida_end = cport_id; } else { dev_err(&hd->dev, "cport %d not available\n", cport_id); return -EINVAL; } return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL); } /* Locking: Caller guarantees serialisation */ void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id) { if (hd->driver->cport_release) { hd->driver->cport_release(hd, cport_id); return; } ida_free(&hd->cport_id_map, cport_id); } static void gb_hd_release(struct device *dev) { struct gb_host_device *hd = to_gb_host_device(dev); trace_gb_hd_release(hd); if (hd->svc) gb_svc_put(hd->svc); ida_free(&gb_hd_bus_id_map, hd->bus_id); ida_destroy(&hd->cport_id_map); kfree(hd); } const struct device_type greybus_hd_type = { .name = "greybus_host_device", .release = gb_hd_release, }; struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, struct device *parent, size_t buffer_size_max, size_t num_cports) { struct gb_host_device *hd; int ret; /* * Validate that the driver implements all of the callbacks * so that we don't have to every time we make them. */ if ((!driver->message_send) || (!driver->message_cancel)) { dev_err(parent, "mandatory hd-callbacks missing\n"); return ERR_PTR(-EINVAL); } if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) { dev_err(parent, "greybus host-device buffers too small\n"); return ERR_PTR(-EINVAL); } if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) { dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports); return ERR_PTR(-EINVAL); } /* * Make sure to never allocate messages larger than what the Greybus * protocol supports. */ if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) { dev_warn(parent, "limiting buffer size to %u\n", GB_OPERATION_MESSAGE_SIZE_MAX); buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX; } hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL); if (!hd) return ERR_PTR(-ENOMEM); ret = ida_alloc_min(&gb_hd_bus_id_map, 1, GFP_KERNEL); if (ret < 0) { kfree(hd); return ERR_PTR(ret); } hd->bus_id = ret; hd->driver = driver; INIT_LIST_HEAD(&hd->modules); INIT_LIST_HEAD(&hd->connections); ida_init(&hd->cport_id_map); hd->buffer_size_max = buffer_size_max; hd->num_cports = num_cports; hd->dev.parent = parent; hd->dev.bus = &greybus_bus_type; hd->dev.type = &greybus_hd_type; hd->dev.groups = bus_groups; hd->dev.dma_mask = hd->dev.parent->dma_mask; device_initialize(&hd->dev); dev_set_name(&hd->dev, "greybus%d", hd->bus_id); trace_gb_hd_create(hd); hd->svc = gb_svc_create(hd); if (!hd->svc) { dev_err(&hd->dev, "failed to create svc\n"); put_device(&hd->dev); return ERR_PTR(-ENOMEM); } return hd; } EXPORT_SYMBOL_GPL(gb_hd_create); int gb_hd_add(struct gb_host_device *hd) { int ret; ret = device_add(&hd->dev); if (ret) return ret; ret = gb_svc_add(hd-&g