Total coverage: 250110 (14%)of 1803272
31 6 13 4 14 225 223 25 25 3 128 52 1 38 11 30 4 192 21 195 87 108 272 44 204 13 1 207 5 19 4 11 1 6 192 1 8 13 11 37 26 246 195 200 18 199 16 15 178 25 200 214 14 172 226 4 8 10 10 3 9 41 1 4 2 18 11 55 134 125 99 3 40 134 8 1 12 8 127 9 8 394 15 8 78 113 5 5 18 50 49 68 8 21 17 1 5 6 6 3 3 9 9 19 12 14 4 11 13 13 13 5 3 8 4 4 7 4 8 35 22 15 25 25 5 20 10 10 25 4 21 21 21 5 95 78 6 16 8 35 50 50 49 48 1 4 1 3 34 13 14 14 7 14 4 16 8 4 3 5 5 3 153 23 14 1 88 10 101 12 72 5 17 18 107 5 5 1 2 2 4 5 10 46 40 8 13 35 18 31 35 35 44 1 2 1 5 4 5 178 178 178 3 160 18 3 153 44 193 11 5 5 176 3 177 70 108 194 6 35 194 163 35 188 2 23 4 192 194 194 193 61 8 128 193 3 179 17 574 572 544 5 468 55 65 460 493 14 51 28 3 452 71 458 61 473 3 298 518 82 303 119 185 490 489 102 93 59 7 23 23 23 14 14 14 13 9 8 3 13 3 1 2 66 11 6 7 8 11 9 66 4 62 27 3 41 56 6 56 47 4 2 3 4 8 34 24 14 21 24 13 7 29 19 310 307 3 310 9 309 239 249 166 40 33 152 80 62 186 28 60 51 5 102 102 102 102 98 102 6 1 100 2 4 2 3 218 7 6 2 203 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 // SPDX-License-Identifier: GPL-1.0+ /* * n_tty.c --- implements the N_TTY line discipline. * * This code used to be in tty_io.c, but things are getting hairy * enough that it made sense to split things off. (The N_TTY * processing has changed so much that it's hardly recognizable, * anyway...) * * Note that the open routine for N_TTY is guaranteed never to return * an error. This is because Linux will fall back to setting a line * to N_TTY if it can not switch to any other line discipline. * * Written by Theodore Ts'o, Copyright 1994. * * This file also contains code originally written by Linus Torvalds, * Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994. * * Reduced memory usage for older ARM systems - Russell King. * * 2000/01/20 Fixed SMP locking on put_tty_queue using bits of * the patch by Andrew J. Kroll <ag784@freenet.buffalo.edu> * who actually finally proved there really was a race. * * 2002/03/18 Implemented n_tty_wakeup to send SIGIO POLL_OUTs to * waiting writing processes-Sapan Bhatia <sapan@corewars.org>. * Also fixed a bug in BLOCKING mode where n_tty_write returns * EAGAIN */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/jiffies.h> #include <linux/math.h> #include <linux/poll.h> #include <linux/ratelimit.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/tty.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include "tty.h" /* * Until this number of characters is queued in the xmit buffer, select will * return "we have room for writes". */ #define WAKEUP_CHARS 256 /* * This defines the low- and high-watermarks for throttling and * unthrottling the TTY driver. These watermarks are used for * controlling the space in the read buffer. */ #define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */ #define TTY_THRESHOLD_UNTHROTTLE 128 /* * Special byte codes used in the echo buffer to represent operations * or special handling of characters. Bytes in the echo buffer that * are not part of such special blocks are treated as normal character * codes. */ #define ECHO_OP_START 0xff #define ECHO_OP_MOVE_BACK_COL 0x80 #define ECHO_OP_SET_CANON_COL 0x81 #define ECHO_OP_ERASE_TAB 0x82 #define ECHO_COMMIT_WATERMARK 256 #define ECHO_BLOCK 256 #define ECHO_DISCARD_WATERMARK N_TTY_BUF_SIZE - (ECHO_BLOCK + 32) #undef N_TTY_TRACE #ifdef N_TTY_TRACE # define n_tty_trace(f, args...) trace_printk(f, ##args) #else # define n_tty_trace(f, args...) no_printk(f, ##args) #endif struct n_tty_data { /* producer-published */ size_t read_head; size_t commit_head; size_t canon_head; size_t echo_head; size_t echo_commit; size_t echo_mark; DECLARE_BITMAP(char_map, 256); /* private to n_tty_receive_overrun (single-threaded) */ unsigned long overrun_time; unsigned int num_overrun; /* non-atomic */ bool no_room; /* must hold exclusive termios_rwsem to reset these */ unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; unsigned char push:1; /* shared by producer and consumer */ u8 read_buf[N_TTY_BUF_SIZE]; DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE); u8 echo_buf[N_TTY_BUF_SIZE]; /* consumer-published */ size_t read_tail; size_t line_start; /* # of chars looked ahead (to find software flow control chars) */ size_t lookahead_count; /* protected by output lock */ unsigned int column; unsigned int canon_column; size_t echo_tail; struct mutex atomic_read_lock; struct mutex output_lock; }; #define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1)) static inline size_t read_cnt(struct n_tty_data *ldata) { return ldata->read_head - ldata->read_tail; } static inline u8 read_buf(struct n_tty_data *ldata, size_t i) { return ldata->read_buf[MASK(i)]; } static inline u8 *read_buf_addr(struct n_tty_data *ldata, size_t i) { return &ldata->read_buf[MASK(i)]; } static inline u8 echo_buf(struct n_tty_data *ldata, size_t i) { smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */ return ldata->echo_buf[MASK(i)]; } static inline u8 *echo_buf_addr(struct n_tty_data *ldata, size_t i) { return &ldata->echo_buf[MASK(i)]; } /* If we are not echoing the data, perhaps this is a secret so erase it */ static void zero_buffer(const struct tty_struct *tty, u8 *buffer, size_t size) { if (L_ICANON(tty) && !L_ECHO(tty)) memset(buffer, 0, size); } static void tty_copy(const struct tty_struct *tty, void *to, size_t tail, size_t n) { struct n_tty_data *ldata = tty->disc_data; size_t size = N_TTY_BUF_SIZE - tail; void *from = read_buf_addr(ldata, tail); if (n > size) { tty_audit_add_data(tty, from, size); memcpy(to, from, size); zero_buffer(tty, from, size); to += size; n -= size; from = ldata->read_buf; } tty_audit_add_data(tty, from, n); memcpy(to, from, n); zero_buffer(tty, from, n); } /** * n_tty_kick_worker - start input worker (if required) * @tty: terminal * * Re-schedules the flip buffer work if it may have stopped. * * Locking: * * Caller holds exclusive %termios_rwsem, or * * n_tty_read()/consumer path: * holds non-exclusive %termios_rwsem */ static void n_tty_kick_worker(const struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; /* Did the input worker stop? Restart it */ if (unlikely(READ_ONCE(ldata->no_room))) { WRITE_ONCE(ldata->no_room, 0); WARN_RATELIMIT(tty->port->itty == NULL, "scheduling with invalid itty\n"); /* see if ldisc has been killed - if so, this means that * even though the ldisc has been halted and ->buf.work * cancelled, ->buf.work is about to be rescheduled */ WARN_RATELIMIT(test_bit(TTY_LDISC_HALTED, &tty->flags), "scheduling buffer work for halted ldisc\n"); tty_buffer_restart_work(tty->port); } } static ssize_t chars_in_buffer(const struct tty_struct *tty) { const struct n_tty_data *ldata = tty->disc_data; size_t head = ldata->icanon ? ldata->canon_head : ldata->commit_head; return head - ldata->read_tail; } /** * n_tty_write_wakeup - asynchronous I/O notifier * @tty: tty device * * Required for the ptys, serial driver etc. since processes that attach * themselves to the master and rely on ASYNC IO must be woken up. */ static void n_tty_write_wakeup(struct tty_struct *tty) { clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); kill_fasync(&tty->fasync, SIGIO, POLL_OUT); } static void n_tty_check_throttle(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; /* * Check the remaining room for the input canonicalization * mode. We don't want to throttle the driver if we're in * canonical mode and don't have a newline yet! */ if (ldata->icanon && ldata->canon_head == ldata->read_tail) return; do { tty_set_flow_change(tty, TTY_THROTTLE_SAFE); if (N_TTY_BUF_SIZE - read_cnt(ldata) >= TTY_THRESHOLD_THROTTLE) break; } while (!tty_throttle_safe(tty)); __tty_set_flow_change(tty, 0); } static void n_tty_check_unthrottle(struct tty_struct *tty) { if (tty->driver->type == TTY_DRIVER_TYPE_PTY) { if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE) return; n_tty_kick_worker(tty); tty_wakeup(tty->link); return; } /* If there is enough space in the read buffer now, let the * low-level driver know. We use chars_in_buffer() to * check the buffer, as it now knows about canonical mode. * Otherwise, if the driver is throttled and the line is * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, * we won't get any more characters. */ do { tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE); if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE) break; n_tty_kick_worker(tty); } while (!tty_unthrottle_safe(tty)); __tty_set_flow_change(tty, 0); } /** * put_tty_queue - add character to tty * @c: character * @ldata: n_tty data * * Add a character to the tty read_buf queue. * * Locking: * * n_tty_receive_buf()/producer path: * caller holds non-exclusive %termios_rwsem */ static inline void put_tty_queue(u8 c, struct n_tty_data *ldata) { *read_buf_addr(ldata, ldata->read_head) = c; ldata->read_head++; } /** * reset_buffer_flags - reset buffer state * @ldata: line disc data to reset * * Reset the read buffer counters and clear the flags. Called from * n_tty_open() and n_tty_flush_buffer(). * * Locking: * * caller holds exclusive %termios_rwsem, or * * (locking is not required) */ static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; ldata->commit_head = 0; ldata->line_start = 0; ldata->erasing = 0; bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->push = 0; ldata->lookahead_count = 0; } static void n_tty_packet_mode_flush(struct tty_struct *tty) { unsigned long flags; if (tty->link->ctrl.packet) { spin_lock_irqsave(&tty->ctrl.lock, flags); tty->ctrl.pktstatus |= TIOCPKT_FLUSHREAD; spin_unlock_irqrestore(&tty->ctrl.lock, flags); wake_up_interruptible(&tty->link->read_wait); } } /** * n_tty_flush_buffer - clean input queue * @tty: terminal device * * Flush the input buffer. Called when the tty layer wants the buffer flushed * (eg at hangup) or when the %N_TTY line discipline internally has to clean * the pending queue (for example some signals). * * Holds %termios_rwsem to exclude producer/consumer while buffer indices are * reset. * * Locking: %ctrl.lock, exclusive %termios_rwsem */ static void n_tty_flush_buffer(struct tty_struct *tty) { down_write(&tty->termios_rwsem); reset_buffer_flags(tty->disc_data); n_tty_kick_worker(tty); if (tty->link) n_tty_packet_mode_flush(tty); up_write(&tty->termios_rwsem); } /** * is_utf8_continuation - utf8 multibyte check * @c: byte to check * * Returns: true if the utf8 character @c is a multibyte continuation * character. We use this to correctly compute the on-screen size of the * character when printing. */ static inline int is_utf8_continuation(u8 c) { return (c & 0xc0) == 0x80; } /** * is_continuation - multibyte check * @c: byte to check * @tty: terminal device * * Returns: true if the utf8 character @c is a multibyte continuation character * and the terminal is in unicode mode. */ static inline int is_continuation(u8 c, const struct tty_struct *tty) { return I_IUTF8(tty) && is_utf8_continuation(c); } /** * do_output_char - output one character * @c: character (or partial unicode symbol) * @tty: terminal device * @space: space available in tty driver write buffer * * This is a helper function that handles one output character (including * special characters like TAB, CR, LF, etc.), doing OPOST processing and * putting the results in the tty driver's write buffer. * * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY and NLDLY. * They simply aren't relevant in the world today. If you ever need them, add * them here. * * Returns: the number of bytes of buffer space used or -1 if no space left. * * Locking: should be called under the %output_lock to protect the column state * and space left in the buffer. */ static int do_output_char(u8 c, struct tty_struct *tty, int space) { struct n_tty_data *ldata = tty->disc_data; int spaces; if (!space) return -1; switch (c) { case '\n': if (O_ONLRET(tty)) ldata->column = 0; if (O_ONLCR(tty)) { if (space < 2) return -1; ldata->canon_column = ldata->column = 0; tty->ops->write(tty, "\r\n", 2); return 2; } ldata->canon_column = ldata->column; break; case '\r': if (O_ONOCR(tty) && ldata->column == 0) return 0; if (O_OCRNL(tty)) { c = '\n'; if (O_ONLRET(tty)) ldata->canon_column = ldata->column = 0; break; } ldata->canon_column = ldata->column = 0; break; case '\t': spaces = 8 - (ldata->column & 7); if (O_TABDLY(tty) == XTABS) { if (space < spaces) return -1; ldata->column += spaces; tty->ops->write(tty, " ", spaces); return spaces; } ldata->column += spaces; break; case '\b': if (ldata->column > 0) ldata->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) c = toupper(c); if (!is_continuation(c, tty)) ldata->column++; } break; } tty_put_char(tty, c); return 1; } /** * process_output - output post processor * @c: character (or partial unicode symbol) * @tty: terminal device * * Output one character with OPOST processing. * * Returns: -1 when the output device is full and the character must be * retried. * * Locking: %output_lock to protect column state and space left (also, this is *called from n_tty_write() under the tty layer write lock). */ static int process_output(u8 c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int space, retval; mutex_lock(&ldata->output_lock); space = tty_write_room(tty); retval = do_output_char(c, tty, space); mutex_unlock(&ldata->output_lock); if (retval < 0) return -1; else return 0; } /** * process_output_block - block post processor * @tty: terminal device * @buf: character buffer * @nr: number of bytes to output * * Output a block of characters with OPOST processing. * * This path is used to speed up block console writes, among other things when * processing blocks of output data. It handles only the simple cases normally * found and helps to generate blocks of symbols for the console driver and * thus improve performance. * * Returns: the number of characters output. * * Locking: %output_lock to protect column state and space left (also, this is * called from n_tty_write() under the tty layer write lock). */ static ssize_t process_output_block(struct tty_struct *tty, const u8 *buf, unsigned int nr) { struct n_tty_data *ldata = tty->disc_data; int space; int i; const u8 *cp; mutex_lock(&ldata->output_lock); space = tty_write_room(tty); if (space <= 0) { mutex_unlock(&ldata->output_lock); return space; } if (nr > space) nr = space; for (i = 0, cp = buf; i < nr; i++, cp++) { u8 c = *cp; switch (c) { case '\n': if (O_ONLRET(tty)) ldata->column = 0; if (O_ONLCR(tty)) goto break_out; ldata->canon_column = ldata->column; break; case '\r': if (O_ONOCR(tty) && ldata->column == 0) goto break_out; if (O_OCRNL(tty)) goto break_out; ldata->canon_column = ldata->column = 0; break; case '\t': goto break_out; case '\b': if (ldata->column > 0) ldata->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) goto break_out; if (!is_continuation(c, tty)) ldata->column++; } break; } } break_out: i = tty->ops->write(tty, buf, i); mutex_unlock(&ldata->output_lock); return i; } static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail, int space) { struct n_tty_data *ldata = tty->disc_data; u8 op; /* * Since add_echo_byte() is called without holding output_lock, we * might see only portion of multi-byte operation. */ if (MASK(ldata->echo_commit) == MASK(*tail + 1)) return -ENODATA; /* * If the buffer byte is the start of a multi-byte operation, get the * next byte, which is either the op code or a control character value. */ op = echo_buf(ldata, *tail + 1); switch (op) { case ECHO_OP_ERASE_TAB: { unsigned int num_chars, num_bs; if (MASK(ldata->echo_commit) == MASK(*tail + 2)) return -ENODATA; num_chars = echo_buf(ldata, *tail + 2); /* * Determine how many columns to go back in order to erase the * tab. This depends on the number of columns used by other * characters within the tab area. If this (modulo 8) count is * from the start of input rather than from a previous tab, we * offset by canon column. Otherwise, tab spacing is normal. */ if (!(num_chars & 0x80)) num_chars += ldata->canon_column; num_bs = 8 - (num_chars & 7); if (num_bs > space) return -ENOSPC; space -= num_bs; while (num_bs--) { tty_put_char(tty, '\b'); if (ldata->column > 0) ldata->column--; } *tail += 3; break; } case ECHO_OP_SET_CANON_COL: ldata->canon_column = ldata->column; *tail += 2; break; case ECHO_OP_MOVE_BACK_COL: if (ldata->column > 0) ldata->column--; *tail += 2; break; case ECHO_OP_START: /* This is an escaped echo op start code */ if (!space) return -ENOSPC; tty_put_char(tty, ECHO_OP_START); ldata->column++; space--; *tail += 2; break; default: /* * If the op is not a special byte code, it is a ctrl char * tagged to be echoed as "^X" (where X is the letter * representing the control char). Note that we must ensure * there is enough space for the whole ctrl pair. */ if (space < 2) return -ENOSPC; tty_put_char(tty, '^'); tty_put_char(tty, op ^ 0100); ldata->column += 2; space -= 2; *tail += 2; break; } return space; } /** * __process_echoes - write pending echo characters * @tty: terminal device * * Write previously buffered echo (and other ldisc-generated) characters to the * tty. * * Characters generated by the ldisc (including echoes) need to be buffered * because the driver's write buffer can fill during heavy program output. * Echoing straight to the driver will often fail under these conditions, * causing lost characters and resulting mismatches of ldisc state information. * * Since the ldisc state must represent the characters actually sent to the * driver at the time of the write, operations like certain changes in column * state are also saved in the buffer and executed here. * * A circular fifo buffer is used so that the most recent characters are * prioritized. Also, when control characters are echoed with a prefixed "^", * the pair is treated atomically and thus not separated. * * Locking: callers must hold %output_lock. */ static size_t __process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int space, old_space; size_t tail; u8 c; old_space = space = tty_write_room(tty); tail = ldata->echo_tail; while (MASK(ldata->echo_commit) != MASK(tail)) { c = echo_buf(ldata, tail); if (c == ECHO_OP_START) { int ret = n_tty_process_echo_ops(tty, &tail, space); if (ret == -ENODATA) goto not_yet_stored; if (ret < 0) break; space = ret; } else { if (O_OPOST(tty)) { int retval = do_output_char(c, tty, space); if (retval < 0) break; space -= retval; } else { if (!space) break; tty_put_char(tty, c); space -= 1; } tail += 1; } } /* If the echo buffer is nearly full (so that the possibility exists * of echo overrun before the next commit), then discard enough * data at the tail to prevent a subsequent overrun */ while (ldata->echo_commit > tail && ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { if (echo_buf(ldata, tail) == ECHO_OP_START) { if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) tail += 3; else tail += 2; } else tail++; } not_yet_stored: ldata->echo_tail = tail; return old_space - space; } static void commit_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; size_t nr, old, echoed; size_t head; mutex_lock(&ldata->output_lock); head = ldata->echo_head; ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; /* Process committed echoes if the accumulated # of bytes * is over the threshold (and try again each time another * block is accumulated) */ nr = head - ldata->echo_tail; if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) { mutex_unlock(&ldata->output_lock); return; } ldata->echo_commit = head; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); if (echoed && tty->ops->flush_chars) tty->ops->flush_chars(tty); } static void process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; size_t echoed; if (ldata->echo_mark == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = ldata->echo_mark; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); if (echoed && tty->ops->flush_chars) tty->ops->flush_chars(tty); } /* NB: echo_mark and echo_head should be equivalent here */ static void flush_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if ((!L_ECHO(tty) && !L_ECHONL(tty)) || ldata->echo_commit == ldata->echo_head) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = ldata->echo_head; __process_echoes(tty); mutex_unlock(&ldata->output_lock); } /** * add_echo_byte - add a byte to the echo buffer * @c: unicode byte to echo * @ldata: n_tty data * * Add a character or operation byte to the echo buffer. */ static inline void add_echo_byte(u8 c, struct n_tty_data *ldata) { *echo_buf_addr(ldata, ldata->echo_head) = c; smp_wmb(); /* Matches smp_rmb() in echo_buf(). */ ldata->echo_head++; } /** * echo_move_back_col - add operation to move back a column * @ldata: n_tty data * * Add an operation to the echo buffer to move back one column. */ static void echo_move_back_col(struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_MOVE_BACK_COL, ldata); } /** * echo_set_canon_col - add operation to set the canon column * @ldata: n_tty data * * Add an operation to the echo buffer to set the canon column to the current * column. */ static void echo_set_canon_col(struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_SET_CANON_COL, ldata); } /** * echo_erase_tab - add operation to erase a tab * @num_chars: number of character columns already used * @after_tab: true if num_chars starts after a previous tab * @ldata: n_tty data * * Add an operation to the echo buffer to erase a tab. * * Called by the eraser function, which knows how many character columns have * been used since either a previous tab or the start of input. This * information will be used later, along with canon column (if applicable), to * go back the correct number of columns. */ static void echo_erase_tab(unsigned int num_chars, int after_tab, struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_ERASE_TAB, ldata); /* We only need to know this modulo 8 (tab spacing) */ num_chars &= 7; /* Set the high bit as a flag if num_chars is after a previous tab */ if (after_tab) num_chars |= 0x80; add_echo_byte(num_chars, ldata); } /** * echo_char_raw - echo a character raw * @c: unicode byte to echo * @ldata: line disc data * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path. * * This variant does not treat control characters specially. */ static void echo_char_raw(u8 c, struct n_tty_data *ldata) { if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_START, ldata); } else { add_echo_byte(c, ldata); } } /** * echo_char - echo a character * @c: unicode byte to echo * @tty: terminal device * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path. * * This variant tags control characters to be echoed as "^X" (where X is the * letter representing the control char). */ static void echo_char(u8 c, const struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_START, ldata); } else { if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(c, ldata); } } /** * finish_erasing - complete erase * @ldata: n_tty data */ static inline void finish_erasing(struct n_tty_data *ldata) { if (ldata->erasing) { echo_char_raw('/', ldata); ldata->erasing = 0; } } /** * eraser - handle erase function * @c: character input * @tty: terminal device * * Perform erase and necessary output when an erase character is present in the * stream from the driver layer. Handles the complexities of UTF-8 multibyte * symbols. * * Locking: n_tty_receive_buf()/producer path: * caller holds non-exclusive %termios_rwsem */ static void eraser(u8 c, const struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; enum { ERASE, WERASE, KILL } kill_type; size_t head; size_t cnt; int seen_alnums; if (ldata->read_head == ldata->canon_head) { /* process_output('\a', tty); */ /* what do you think? */ return; } if (c == ERASE_CHAR(tty)) kill_type = ERASE; else if (c == WERASE_CHAR(tty)) kill_type = WERASE; else { if (!L_ECHO(tty)) { ldata->read_head = ldata->canon_head; return; } if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) { ldata->read_head = ldata->canon_head; finish_erasing(ldata); echo_char(KILL_CHAR(tty), tty); /* Add a newline if ECHOK is on and ECHOKE is off. */ if (L_ECHOK(tty)) echo_char_raw('\n', ldata); return; } kill_type = KILL; } seen_alnums = 0; while (MASK(ldata->read_head) != MASK(ldata->canon_head)) { head = ldata->read_head; /* erase a single possibly multibyte character */ do { head--; c = read_buf(ldata, head); } while (is_continuation(c, tty) && MASK(head) != MASK(ldata->canon_head)); /* do not partially erase */ if (is_continuation(c, tty)) break; if (kill_type == WERASE) { /* Equivalent to BSD's ALTWERASE. */ if (isalnum(c) || c == '_') seen_alnums++; else if (seen_alnums) break; } cnt = ldata->read_head - head; ldata->read_head = head; if (L_ECHO(tty)) { if (L_ECHOPRT(tty)) { if (!ldata->erasing) { echo_char_raw('\\', ldata); ldata->erasing = 1; } /* if cnt > 1, output a multi-byte character */ echo_char(c, tty); while (--cnt > 0) { head++; echo_char_raw(read_buf(ldata, head), ldata); echo_move_back_col(ldata); } } else if (kill_type == ERASE && !L_ECHOE(tty)) { echo_char(ERASE_CHAR(tty), tty); } else if (c == '\t') { unsigned int num_chars = 0; int after_tab = 0; size_t tail = ldata->read_head; /* * Count the columns used for characters * since the start of input or after a * previous tab. * This info is used to go back the correct * number of columns. */ while (MASK(tail) != MASK(ldata->canon_head)) { tail--; c = read_buf(ldata, tail); if (c == '\t') { after_tab = 1; break; } else if (iscntrl(c)) { if (L_ECHOCTL(tty)) num_chars += 2; } else if (!is_continuation(c, tty)) { num_chars++; } } echo_erase_tab(num_chars, after_tab, ldata); } else { if (iscntrl(c) && L_ECHOCTL(tty)) { echo_char_raw('\b', ldata); echo_char_raw(' ', ldata); echo_char_raw('\b', ldata); } if (!iscntrl(c) || L_ECHOCTL(tty)) { echo_char_raw('\b', ldata); echo_char_raw(' ', ldata); echo_char_raw('\b', ldata); } } } if (kill_type == ERASE) break; } if (ldata->read_head == ldata->canon_head && L_ECHO(tty)) finish_erasing(ldata); } static void __isig(int sig, struct tty_struct *tty) { struct pid *tty_pgrp = tty_get_pgrp(tty); if (tty_pgrp) { kill_pgrp(tty_pgrp, sig, 1); put_pid(tty_pgrp); } } /** * isig - handle the ISIG optio * @sig: signal * @tty: terminal * * Called when a signal is being sent due to terminal input. Called from the * &tty_driver.receive_buf() path, so serialized. * * Performs input and output flush if !NOFLSH. In this context, the echo * buffer is 'output'. The signal is processed first to alert any current * readers or writers to discontinue and exit their i/o loops. * * Locking: %ctrl.lock */ static void isig(int sig, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (L_NOFLSH(tty)) { /* signal only */ __isig(sig, tty); } else { /* signal and flush */ up_read(&tty->termios_rwsem); down_write(&tty->termios_rwsem); __isig(sig, tty); /* clear echo buffer */ mutex_lock(&ldata->output_lock); ldata->echo_head = ldata->echo_tail = 0; ldata->echo_mark = ldata->echo_commit = 0; mutex_unlock(&ldata->output_lock); /* clear output buffer */ tty_driver_flush_buffer(tty); /* clear input buffer */ reset_buffer_flags(tty->disc_data); /* notify pty master of flush */ if (tty->link) n_tty_packet_mode_flush(tty); up_write(&tty->termios_rwsem); down_read(&tty->termios_rwsem); } } /** * n_tty_receive_break - handle break * @tty: terminal * * An RS232 break event has been hit in the incoming bitstream. This can cause * a variety of events depending upon the termios settings. * * Locking: n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * * Note: may get exclusive %termios_rwsem if flushing input buffer */ static void n_tty_receive_break(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (I_IGNBRK(tty)) return; if (I_BRKINT(tty)) { isig(SIGINT, tty); return; } if (I_PARMRK(tty)) { put_tty_queue('\377', ldata); put_tty_queue('\0', ldata); } put_tty_queue('\0', ldata); } /** * n_tty_receive_overrun - handle overrun reporting * @tty: terminal * * Data arrived faster than we could process it. While the tty driver has * flagged this the bits that were missed are gone forever. * * Called from the receive_buf path so single threaded. Does not need locking * as num_overrun and overrun_time are function private. */ static void n_tty_receive_overrun(const struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; ldata->num_overrun++; if (time_is_before_jiffies(ldata->overrun_time + HZ)) { tty_warn(tty, "%u input overrun(s)\n", ldata->num_overrun); ldata->overrun_time = jiffies; ldata->num_overrun = 0; } } /** * n_tty_receive_parity_error - error notifier * @tty: terminal device * @c: character * * Process a parity error and queue the right data to indicate the error case * if necessary. * * Locking: n_tty_receive_buf()/producer path: * caller holds non-exclusive %termios_rwsem */ static void n_tty_receive_parity_error(const struct tty_struct *tty, u8 c) { struct n_tty_data *ldata = tty->disc_data; if (I_INPCK(tty)) { if (I_IGNPAR(tty)) return; if (I_PARMRK(tty)) { put_tty_queue('\377', ldata); put_tty_queue('\0', ldata); put_tty_queue(c, ldata); } else put_tty_queue('\0', ldata); } else put_tty_queue(c, ldata); } static void n_tty_receive_signal_char(struct tty_struct *tty, int signal, u8 c) { isig(signal, tty); if (I_IXON(tty)) start_tty(tty); if (L_ECHO(tty)) { echo_char(c, tty); commit_echoes(tty); } else process_echoes(tty); } static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, u8 c) { return c == START_CHAR(tty) || c == STOP_CHAR(tty); } /** * n_tty_receive_char_flow_ctrl - receive flow control chars * @tty: terminal device * @c: character * @lookahead_done: lookahead has processed this character already * * Receive and process flow control character actions. * * In case lookahead for flow control chars already handled the character in * advance to the normal receive, the actions are skipped during normal * receive. * * Returns true if @c is consumed as flow-control character, the character * must not be treated as normal character. */ static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, u8 c, bool lookahead_done) { if (!n_tty_is_char_flow_ctrl(tty, c)) return false; if (lookahead_done) return true; if (c == START_CHAR(tty)) { start_tty(tty); process_echoes(tty); return true; } /* STOP_CHAR */ stop_tty(tty); return true; } static void n_tty_receive_handle_newline(struct tty_struct *tty, u8 c) { struct n_tty_data *ldata = tty->disc_data; set_bit(MASK(ldata->read_head), ldata->read_flags); put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } static bool n_tty_receive_char_canon(struct tty_struct *tty, u8 c) { struct n_tty_data *ldata = tty->disc_data; if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { eraser(c, tty); commit_echoes(tty); return true; } if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) { ldata->lnext = 1; if (L_ECHO(tty)) { finish_erasing(ldata); if (L_ECHOCTL(tty)) { echo_char_raw('^', ldata); echo_char_raw('\b', ldata); commit_echoes(tty); } } return true; } if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) { size_t tail = ldata->canon_head; finish_erasing(ldata); echo_char(c, tty); echo_char_raw('\n', ldata); while (MASK(tail) != MASK(ldata->read_head)) { echo_char(read_buf(ldata, tail), tty); tail++; } commit_echoes(tty); return true; } if (c == '\n') { if (L_ECHO(tty) || L_ECHONL(tty)) { echo_char_raw('\n', ldata); commit_echoes(tty); } n_tty_receive_handle_newline(tty, c); return true; } if (c == EOF_CHAR(tty)) { c = __DISABLED_CHAR; n_tty_receive_handle_newline(tty, c); return true; } if ((c == EOL_CHAR(tty)) || (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) { /* * XXX are EOL_CHAR and EOL2_CHAR echoed?!? */ if (L_ECHO(tty)) { /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } /* * XXX does PARMRK doubling happen for * EOL_CHAR and EOL2_CHAR? */ if (c == '\377' && I_PARMRK(tty)) put_tty_queue(c, ldata); n_tty_receive_handle_newline(tty, c); return true; } return false; } static void n_tty_receive_char_special(struct tty_struct *tty, u8 c, bool lookahead_done) { struct n_tty_data *ldata = tty->disc_data; if (I_IXON(tty) && n_tty_receive_char_flow_ctrl(tty, c, lookahead_done)) return; if (L_ISIG(tty)) { if (c == INTR_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGINT, c); return; } else if (c == QUIT_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGQUIT, c); return; } else if (c == SUSP_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGTSTP, c); return; } } if (tty->flow.stopped && !tty->flow.tco_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (c == '\r') { if (I_IGNCR(tty)) return; if (I_ICRNL(tty)) c = '\n'; } else if (c == '\n' && I_INLCR(tty)) c = '\r'; if (ldata->icanon && n_tty_receive_char_canon(tty, c)) return; if (L_ECHO(tty)) { finish_erasing(ldata); if (c == '\n') echo_char_raw('\n', ldata); else { /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); } commit_echoes(tty); } /* PARMRK doubling check */ if (c == '\377' && I_PARMRK(tty)) put_tty_queue(c, ldata); put_tty_queue(c, ldata); } /** * n_tty_receive_char - perform processing * @tty: terminal device * @c: character * * Process an individual character of input received from the driver. This is * serialized with respect to itself by the rules for the driver above. * * Locking: n_tty_receive_buf()/producer path: * caller holds non-exclusive %termios_rwsem * publishes canon_head if canonical mode is active */ static void n_tty_receive_char(struct tty_struct *tty, u8 c) { struct n_tty_data *ldata = tty->disc_data; if (tty->flow.stopped && !tty->flow.tco_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (L_ECHO(tty)) { finish_erasing(ldata); /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } /* PARMRK doubling check */ if (c == '\377' && I_PARMRK(tty)) put_tty_queue(c, ldata); put_tty_queue(c, ldata); } static void n_tty_receive_char_closing(struct tty_struct *tty, u8 c, bool lookahead_done) { if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); if (I_IXON(tty)) { if (!n_tty_receive_char_flow_ctrl(tty, c, lookahead_done) && tty->flow.stopped && !tty->flow.tco_stopped && I_IXANY(tty) && c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty)) { start_tty(tty); process_echoes(tty); } } } static void n_tty_receive_char_flagged(struct tty_struct *tty, u8 c, u8 flag) { switch (flag) { case TTY_BREAK: n_tty_receive_break(tty); break; case TTY_PARITY: case TTY_FRAME: n_tty_receive_parity_error(tty, c); break; case TTY_OVERRUN: n_tty_receive_overrun(tty); break; default: tty_err(tty, "unknown flag %u\n", flag); break; } } static void n_tty_receive_char_lnext(struct tty_struct *tty, u8 c, u8 flag) { struct n_tty_data *ldata = tty->disc_data; ldata->lnext = 0; if (likely(flag == TTY_NORMAL)) { if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); n_tty_receive_char(tty, c); } else n_tty_receive_char_flagged(tty, c, flag); } /* Caller must ensure count > 0 */ static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; u8 flag = TTY_NORMAL; ldata->lookahead_count += count; if (!I_IXON(tty)) return; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) n_tty_receive_char_flow_ctrl(tty, *cp, false); cp++; } } static void n_tty_receive_buf_real_raw(const struct tty_struct *tty, const u8 *cp, size_t count) { struct n_tty_data *ldata = tty->disc_data; /* handle buffer wrap-around by a loop */ for (unsigned int i = 0; i < 2; i++) { size_t head = MASK(ldata->read_head); size_t n = min(count, N_TTY_BUF_SIZE - head); memcpy(read_buf_addr(ldata, head), cp, n); ldata->read_head += n; cp += n; count -= n; } } static void n_tty_receive_buf_raw(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; u8 flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) put_tty_queue(*cp++, ldata); else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void n_tty_receive_buf_closing(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count, bool lookahead_done) { u8 flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) n_tty_receive_char_closing(tty, *cp++, lookahead_done); } } static void n_tty_receive_buf_standard(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count, bool lookahead_done) { struct n_tty_data *ldata = tty->disc_data; u8 flag = TTY_NORMAL; while (count--) { u8 c = *cp++; if (fp) flag = *fp++; if (ldata->lnext) { n_tty_receive_char_lnext(tty, c, flag); continue; } if (unlikely(flag != TTY_NORMAL)) { n_tty_receive_char_flagged(tty, c, flag); continue; } if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); if (L_EXTPROC(tty)) { put_tty_queue(c, ldata); continue; } if (test_bit(c, ldata->char_map)) n_tty_receive_char_special(tty, c, lookahead_done); else n_tty_receive_char(tty, c); } } static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct n_tty_data *ldata = tty->disc_data; bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty)); size_t la_count = min(ldata->lookahead_count, count); if (ldata->real_raw) n_tty_receive_buf_real_raw(tty, cp, count); else if (ldata->raw || (L_EXTPROC(tty) && !preops)) n_tty_receive_buf_raw(tty, cp, fp, count); else if (tty->closing && !L_EXTPROC(tty)) { if (la_count > 0) { n_tty_receive_buf_closing(tty, cp, fp, la_count, true); cp += la_count; if (fp) fp += la_count; count -= la_count; } if (count > 0) n_tty_receive_buf_closing(tty, cp, fp, count, false); } else { if (la_count > 0) { n_tty_receive_buf_standard(tty, cp, fp, la_count, true); cp += la_count; if (fp) fp += la_count; count -= la_count; } if (count > 0) n_tty_receive_buf_standard(tty, cp, fp, count, false); flush_echoes(tty); if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } ldata->lookahead_count -= la_count; if (ldata->icanon && !L_EXTPROC(tty)) return; /* publish read_head to consumer */ smp_store_release(&ldata->commit_head, ldata->read_head); if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } } /** * n_tty_receive_buf_common - process input * @tty: device to receive input * @cp: input chars * @fp: flags for each char (if %NULL, all chars are %TTY_NORMAL) * @count: number of input chars in @cp * @flow: enable flow control * * Called by the terminal driver when a block of characters has been received. * This function must be called from soft contexts not from interrupt context. * The driver is responsible for making calls one at a time and in order (or * using flush_to_ldisc()). * * Returns: the # of input chars from @cp which were processed. * * In canonical mode, the maximum line length is 4096 chars (including the line * termination char); lines longer than 4096 chars are truncated. After 4095 * chars, input data is still processed but not stored. Overflow processing * ensures the tty can always receive more input until at least one line can be * read. * * In non-canonical mode, the read buffer will only accept 4095 chars; this * provides the necessary space for a newline char if the input mode is * switched to canonical. * * Note it is possible for the read buffer to _contain_ 4096 chars in * non-canonical mode: the read buffer could already contain the maximum canon * line of 4096 chars when the mode is switched to non-canonical. * * Locking: n_tty_receive_buf()/producer path: * claims non-exclusive %termios_rwsem * publishes commit_head or canon_head */ static size_t n_tty_receive_buf_common(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count, bool flow) { struct n_tty_data *ldata = tty->disc_data; size_t n, rcvd = 0; int room, overflow; down_read(&tty->termios_rwsem); do { /* * When PARMRK is set, each input char may take up to 3 chars * in the read buf; reduce the buffer space avail by 3x * * If we are doing input canonicalization, and there are no * pending newlines, let characters through without limit, so * that erase characters will be handled. Other excess * characters will be beeped. * * paired with store in *_copy_from_read_buf() -- guarantees * the consumer has loaded the data in read_buf up to the new * read_tail (so this producer will not overwrite unread data) */ size_t tail = smp_load_acquire(&ldata->read_tail); room = N_TTY_BUF_SIZE - (ldata->read_head - tail); if (I_PARMRK(tty)) room = DIV_ROUND_UP(room, 3); room--; if (room <= 0) { overflow = ldata->icanon && ldata->canon_head == tail; if (overflow && room < 0) ldata->read_head--; room = overflow; WRITE_ONCE(ldata->no_room, flow && !room); } else overflow = 0; n = min_t(size_t, count, room); if (!n) break; /* ignore parity errors if handling overflow */ if (!overflow || !fp || *fp != TTY_PARITY) __receive_buf(tty, cp, fp, n); cp += n; if (fp) fp += n; count -= n; rcvd += n; } while (!test_bit(TTY_LDISC_CHANGING, &tty->flags)); tty->receive_room = room; /* Unthrottle if handling overflow on pty */ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) { if (overflow) { tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE); tty_unthrottle_safe(tty); __tty_set_flow_change(tty, 0); } } else n_tty_check_throttle(tty); if (unlikely(ldata->no_room)) { /* * Barrier here is to ensure to read the latest read_tail in * chars_in_buffer() and to make sure that read_tail is not loaded * before ldata->no_room is set. */ smp_mb(); if (!chars_in_buffer(tty)) n_tty_kick_worker(tty); } up_read(&tty->termios_rwsem); return rcvd; } static void n_tty_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { n_tty_receive_buf_common(tty, cp, fp, count, false); } static size_t n_tty_receive_buf2(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { return n_tty_receive_buf_common(tty, cp, fp, count, true); } /** * n_tty_set_termios - termios data changed * @tty: terminal * @old: previous data * * Called by the tty layer when the user changes termios flags so that the line * discipline can plan ahead. This function cannot sleep and is protected from * re-entry by the tty layer. The user is guaranteed that this function will * not be re-entered or in progress when the ldisc is closed. * * Locking: Caller holds @tty->termios_rwsem */ static void n_tty_set_termios(struct tty_struct *tty, const struct ktermios *old) { struct n_tty_data *ldata = tty->disc_data; if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->line_start = ldata->read_tail; if (!L_ICANON(tty) || !read_cnt(ldata)) { ldata->canon_head = ldata->read_tail; ldata->push = 0; } else { set_bit(MASK(ldata->read_head - 1), ldata->read_flags); ldata->canon_head = ldata->read_head; ldata->push = 1; } ldata->commit_head = ldata->read_head; ldata->erasing = 0; ldata->lnext = 0; } ldata->icanon = (L_ICANON(tty) != 0); if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) || I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) || I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) || I_PARMRK(tty)) { bitmap_zero(ldata->char_map, 256); if (I_IGNCR(tty) || I_ICRNL(tty)) set_bit('\r', ldata->char_map); if (I_INLCR(tty)) set_bit('\n', ldata->char_map); if (L_ICANON(tty)) { set_bit(ERASE_CHAR(tty), ldata->char_map); set_bit(KILL_CHAR(tty), ldata->char_map); set_bit(EOF_CHAR(tty), ldata->char_map); set_bit('\n', ldata->char_map); set_bit(EOL_CHAR(tty), ldata->char_map); if (L_IEXTEN(tty)) { set_bit(WERASE_CHAR(tty), ldata->char_map); set_bit(LNEXT_CHAR(tty), ldata->char_map); set_bit(EOL2_CHAR(tty), ldata->char_map); if (L_ECHO(tty)) set_bit(REPRINT_CHAR(tty), ldata->char_map); } } if (I_IXON(tty)) { set_bit(START_CHAR(tty), ldata->char_map); set_bit(STOP_CHAR(tty), ldata->char_map); } if (L_ISIG(tty)) { set_bit(INTR_CHAR(tty), ldata->char_map); set_bit(QUIT_CHAR(tty), ldata->char_map); set_bit(SUSP_CHAR(tty), ldata->char_map); } clear_bit(__DISABLED_CHAR, ldata->char_map); ldata->raw = 0; ldata->real_raw = 0; } else { ldata->raw = 1; if ((I_IGNBRK(tty) || (!I_BRKINT(tty) && !I_PARMRK(tty))) && (I_IGNPAR(tty) || !I_INPCK(tty)) && (tty->driver->flags & TTY_DRIVER_REAL_RAW)) ldata->real_raw = 1; else ldata->real_raw = 0; } /* * Fix tty hang when I_IXON(tty) is cleared, but the tty * been stopped by STOP_CHAR(tty) before it. */ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow.tco_stopped) { start_tty(tty); process_echoes(tty); } /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); } /** * n_tty_close - close the ldisc for this tty * @tty: device * * Called from the terminal layer when this line discipline is being shut down, * either because of a close or becsuse of a discipline change. The function * will not be called while other ldisc methods are in progress. */ static void n_tty_close(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (tty->link) n_tty_packet_mode_flush(tty); down_write(&tty->termios_rwsem); vfree(ldata); tty->disc_data = NULL; up_write(&tty->termios_rwsem); } /** * n_tty_open - open an ldisc * @tty: terminal to open * * Called when this line discipline is being attached to the terminal device. * Can sleep. Called serialized so that no other events will occur in parallel. * No further open will occur until a close. */ static int n_tty_open(struct tty_struct *tty) { struct n_tty_data *ldata; /* Currently a malloc failure here can panic */ ldata = vzalloc(sizeof(*ldata)); if (!ldata) return -ENOMEM; ldata->overrun_time = jiffies; mutex_init(&ldata->atomic_read_lock); mutex_init(&ldata->output_lock); tty->disc_data = ldata; tty->closing = 0; /* indicate buffer work may resume */ clear_bit(TTY_LDISC_HALTED, &tty->flags); n_tty_set_termios(tty, NULL); tty_unthrottle(tty); return 0; } static inline int input_available_p(const struct tty_struct *tty, int poll) { const struct n_tty_data *ldata = tty->disc_data; int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1; if (ldata->icanon && !L_EXTPROC(tty)) return ldata->canon_head != ldata->read_tail; else return ldata->commit_head - ldata->read_tail >= amt; } /** * copy_from_read_buf - copy read data directly * @tty: terminal device * @kbp: data * @nr: size of data * * Helper function to speed up n_tty_read(). It is only called when %ICANON is * off; it copies characters straight from the tty queue. * * Returns: true if it successfully copied data, but there is still more data * to be had. * * Locking: * * called under the @ldata->atomic_read_lock sem * * n_tty_read()/consumer path: * caller holds non-exclusive %termios_rwsem; * read_tail published */ static bool copy_from_read_buf(const struct tty_struct *tty, u8 **kbp, size_t *nr) { struct n_tty_data *ldata = tty->disc_data; size_t n; bool is_eof; size_t head = smp_load_acquire(&ldata->commit_head); size_t tail = MASK(ldata->read_tail); n = min3(head - ldata->read_tail, N_TTY_BUF_SIZE - tail, *nr); if (!n) return false; u8 *from = read_buf_addr(ldata, tail); memcpy(*kbp, from, n); is_eof = n == 1 && *from == EOF_CHAR(tty); tty_audit_add_data(tty, from, n); zero_buffer(tty, from, n); smp_store_release(&ldata->read_tail, ldata->read_tail + n); /* Turn single EOF into zero-length read */ if (L_EXTPROC(tty) && ldata->icanon && is_eof && head == ldata->read_tail) return false; *kbp += n; *nr -= n; /* If we have more to copy, let the caller know */ return head != ldata->read_tail; } /** * canon_copy_from_read_buf - copy read data in canonical mode * @tty: terminal device * @kbp: data * @nr: size of data * * Helper function for n_tty_read(). It is only called when %ICANON is on; it * copies one line of input up to and including the line-delimiting character * into the result buffer. * * Note: When termios is changed from non-canonical to canonical mode and the * read buffer contains data, n_tty_set_termios() simulates an EOF push (as if * C-d were input) _without_ the %DISABLED_CHAR in the buffer. This causes data * already processed as input to be immediately available as input although a * newline has not been received. * * Locking: * * called under the %atomic_read_lock mutex * * n_tty_read()/consumer path: * caller holds non-exclusive %termios_rwsem; * read_tail published */ static bool canon_copy_from_read_buf(const struct tty_struct *tty, u8 **kbp, size_t *nr) { struct n_tty_data *ldata = tty->disc_data; size_t n, size, more, c; size_t eol; size_t tail, canon_head; int found = 0; /* N.B. avoid overrun if nr == 0 */ if (!*nr) return false; canon_head = smp_load_acquire(&ldata->canon_head); n = min(*nr, canon_head - ldata->read_tail); tail = MASK(ldata->read_tail); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n", __func__, *nr, tail, n, size); eol = find_next_bit(ldata->read_flags, size, tail); more = n - (size - tail); if (eol == N_TTY_BUF_SIZE && more) { /* scan wrapped without finding set bit */ eol = find_first_bit(ldata->read_flags, more); found = eol != more; } else found = eol != size; n = eol - tail; if (n > N_TTY_BUF_SIZE) n += N_TTY_BUF_SIZE; c = n + found; if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) n = c; n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n", __func__, eol, found, n, c, tail, more); tty_copy(tty, *kbp, tail, n); *kbp += n; *nr -= n; if (found) clear_bit(eol, ldata->read_flags); smp_store_release(&ldata->read_tail, ldata->read_tail + c); if (found) { if (!ldata->push) ldata->line_start = ldata->read_tail; else ldata->push = 0; tty_audit_push(); return false; } /* No EOL found - do a continuation retry if there is more data */ return ldata->read_tail != canon_head; } /* * If we finished a read at the exact location of an * EOF (special EOL character that's a __DISABLED_CHAR) * in the stream, silently eat the EOF. */ static void canon_skip_eof(struct n_tty_data *ldata) { size_t tail, canon_head; canon_head = smp_load_acquire(&ldata->canon_head); tail = ldata->read_tail; // No data? if (tail == canon_head) return; // See if the tail position is EOF in the circular buffer tail &= (N_TTY_BUF_SIZE - 1); if (!test_bit(tail, ldata->read_flags)) return; if (read_buf(ldata, tail) != __DISABLED_CHAR) return; // Clear the EOL bit, skip the EOF char. clear_bit(tail, ldata->read_flags); smp_store_release(&ldata->read_tail, ldata->read_tail + 1); } /** * job_control - check job control * @tty: tty * @file: file handle * * Perform job control management checks on this @file/@tty descriptor and if * appropriate send any needed signals and return a negative error code if * action should be taken. * * Locking: * * redirected write test is safe * * current->signal->tty check is safe * * ctrl.lock to safely reference @tty->ctrl.pgrp */ static int job_control(struct tty_struct *tty, struct file *file) { /* Job control check -- must be done at start and after every sleep (POSIX.1 7.1.1.4). */ /* NOTE: not yet done after every sleep pending a thorough check of the logic of this change. -- jlc */ /* don't stop on /dev/console */ if (file->f_op->write_iter == redirected_tty_write) return 0; return __tty_check_change(tty, SIGTTIN); } /** * n_tty_read - read function for tty * @tty: tty device * @file: file object * @kbuf: kernelspace buffer pointer * @nr: size of I/O * @cookie: if non-%NULL, this is a continuation read * @offset: where to continue reading from (unused in n_tty) * * Perform reads for the line discipline. We are guaranteed that the line * discipline will not be closed under us but we may get multiple parallel * readers and must handle this ourselves. We may also get a hangup. Always * called in user context, may sleep. * * This code must be sure never to sleep through a hangup. * * Locking: n_tty_read()/consumer path: * claims non-exclusive termios_rwsem; * publishes read_tail */ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf, size_t nr, void **cookie, unsigned long offset) { struct n_tty_data *ldata = tty->disc_data; u8 *kb = kbuf; DEFINE_WAIT_FUNC(wait, woken_wake_function); int minimum, time; ssize_t retval; long timeout; bool packet; size_t old_tail; /* * Is this a continuation of a read started earler? * * If so, we still hold the atomic_read_lock and the * termios_rwsem, and can just continue to copy data. */ if (*cookie) { if (ldata->icanon && !L_EXTPROC(tty)) { /* * If we have filled the user buffer, see * if we should skip an EOF character before * releasing the lock and returning done. */ if (!nr) canon_skip_eof(ldata); else if (canon_copy_from_read_buf(tty, &kb, &nr)) return kb - kbuf; } else { if (copy_from_read_buf(tty, &kb, &nr)) return kb - kbuf; } /* No more data - release locks and stop retries */ n_tty_kick_worker(tty); n_tty_check_unthrottle(tty); up_read(&tty->termios_rwsem); mutex_unlock(&ldata->atomic_read_lock); *cookie = NULL; return kb - kbuf; } retval = job_control(tty, file); if (retval < 0) return retval; /* * Internal serialization of reads. */ if (file->f_flags & O_NONBLOCK) { if (!mutex_trylock(&ldata->atomic_read_lock)) return -EAGAIN; } else { if (mutex_lock_interruptible(&ldata->atomic_read_lock)) return -ERESTARTSYS; } down_read(&tty->termios_rwsem); minimum = time = 0; timeout = MAX_SCHEDULE_TIMEOUT; if (!ldata->icanon) { minimum = MIN_CHAR(tty); if (minimum) { time = (HZ / 10) * TIME_CHAR(tty); } else { timeout = (HZ / 10) * TIME_CHAR(tty); minimum = 1; } } packet = tty->ctrl.packet; old_tail = ldata->read_tail; add_wait_queue(&tty->read_wait, &wait); while (nr) { /* First test for status change. */ if (packet && tty->link->ctrl.pktstatus) { u8 cs; if (kb != kbuf) break; spin_lock_irq(&tty->link->ctrl.lock); cs = tty->link->ctrl.pktstatus; tty->link->ctrl.pktstatus = 0; spin_unlock_irq(&tty->link->ctrl.lock); *kb++ = cs; nr--; break; } if (!input_available_p(tty, 0)) { up_read(&tty->termios_rwsem); tty_buffer_flush_work(tty->port); down_read(&tty->termios_rwsem); if (!input_available_p(tty, 0)) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { retval = -EIO; break; } if (tty_hung_up_p(file)) break; /* * Abort readers for ttys which never actually * get hung up. See __tty_hangup(). */ if (test_bit(TTY_HUPPING, &tty->flags)) break; if (!timeout) break; if (tty_io_nonblock(tty, file)) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } up_read(&tty->termios_rwsem); timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); down_read(&tty->termios_rwsem); continue; } } if (ldata->icanon && !L_EXTPROC(tty)) { if (canon_copy_from_read_buf(tty, &kb, &nr)) goto more_to_be_read; } else { /* Deal with packet mode. */ if (packet && kb == kbuf) { *kb++ = TIOCPKT_DATA; nr--; } /* * Copy data, and if there is more to be had * and we have nothing more to wait for, then * let's mark us for retries. * * NOTE! We return here with both the termios_sem * and atomic_read_lock still held, the retries * will release them when done. */ if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum) { more_to_be_read: remove_wait_queue(&tty->read_wait, &wait); *cookie = cookie; return kb - kbuf; } } n_tty_check_unthrottle(tty); if (kb - kbuf >= minimum) break; if (time) timeout = time; } if (old_tail != ldata->read_tail) { /* * Make sure no_room is not read in n_tty_kick_worker() * before setting ldata->read_tail in copy_from_read_buf(). */ smp_mb(); n_tty_kick_worker(tty); } up_read(&tty->termios_rwsem); remove_wait_queue(&tty->read_wait, &wait); mutex_unlock(&ldata->atomic_read_lock); if (kb - kbuf) retval = kb - kbuf; return retval; } /** * n_tty_write - write function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Write function of the terminal device. This is serialized with respect to * other write callers but not to termios changes, reads and other such events. * Since the receive code will echo characters, thus calling driver write * methods, the %output_lock is used in the output processing functions called * here as well as in the echo processing function to protect the column state * and space left in the buffer. * * This code must be sure never to sleep through a hangup. * * Locking: output_lock to protect column state and space left * (note that the process_output*() functions take this lock themselves) */ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, const u8 *buf, size_t nr) { const u8 *b = buf; DEFINE_WAIT_FUNC(wait, woken_wake_function); ssize_t num, retval = 0; /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */ if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) { retval = tty_check_change(tty); if (retval) return retval; } down_read(&tty->termios_rwsem); /* Write out any echoed characters that are still pending */ process_echoes(tty); add_wait_queue(&tty->write_wait, &wait); while (1) { if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) { retval = -EIO; break; } if (O_OPOST(tty)) { while (nr > 0) { num = process_output_block(tty, b, nr); if (num < 0) { if (num == -EAGAIN) break; retval = num; goto break_out; } b += num; nr -= num; if (nr == 0) break; if (process_output(*b, tty) < 0) break; b++; nr--; } if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } else { struct n_tty_data *ldata = tty->disc_data; while (nr > 0) { mutex_lock(&ldata->output_lock); num = tty->ops->write(tty, b, nr); mutex_unlock(&ldata->output_lock); if (num < 0) { retval = num; goto break_out; } if (!num) break; b += num; nr -= num; } } if (!nr) break; if (tty_io_nonblock(tty, file)) { retval = -EAGAIN; break; } up_read(&tty->termios_rwsem); wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); down_read(&tty->termios_rwsem); } break_out: remove_wait_queue(&tty->write_wait, &wait); if (nr && tty->fasync) set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); up_read(&tty->termios_rwsem); return (b - buf) ? b - buf : retval; } /** * n_tty_poll - poll method for N_TTY * @tty: terminal device * @file: file accessing it * @wait: poll table * * Called when the line discipline is asked to poll() for data or for special * events. This code is not serialized with respect to other events save * open/close. * * This code must be sure never to sleep through a hangup. * * Locking: called without the kernel lock held -- fine. */ static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { __poll_t mask = 0; poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (input_available_p(tty, 1)) mask |= EPOLLIN | EPOLLRDNORM; else { tty_buffer_flush_work(tty->port); if (input_available_p(tty, 1)) mask |= EPOLLIN | EPOLLRDNORM; } if (tty->ctrl.packet && tty->link->ctrl.pktstatus) mask |= EPOLLPRI | EPOLLIN | EPOLLRDNORM; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= EPOLLHUP; if (tty_hung_up_p(file)) mask |= EPOLLHUP; if (tty->ops->write && !tty_is_writelocked(tty) && tty_chars_in_buffer(tty) < WAKEUP_CHARS && tty_write_room(tty) > 0) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } static unsigned long inq_canon(struct n_tty_data *ldata) { size_t nr, head, tail; if (ldata->canon_head == ldata->read_tail) return 0; head = ldata->canon_head; tail = ldata->read_tail; nr = head - tail; /* Skip EOF-chars.. */ while (MASK(head) != MASK(tail)) { if (test_bit(MASK(tail), ldata->read_flags) && read_buf(ldata, tail) == __DISABLED_CHAR) nr--; tail++; } return nr; } static int n_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct n_tty_data *ldata = tty->disc_data; unsigned int num; switch (cmd) { case TIOCOUTQ: return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); if (L_ICANON(tty) && !L_EXTPROC(tty)) num = inq_canon(ldata); else num = read_cnt(ldata); up_write(&tty->termios_rwsem); return put_user(num, (unsigned int __user *) arg); default: return n_tty_ioctl_helper(tty, cmd, arg); } } static struct tty_ldisc_ops n_tty_ops = { .owner = THIS_MODULE, .num = N_TTY, .name = "n_tty", .open = n_tty_open, .close = n_tty_close, .flush_buffer = n_tty_flush_buffer, .read = n_tty_read, .write = n_tty_write, .ioctl = n_tty_ioctl, .set_termios = n_tty_set_termios, .poll = n_tty_poll, .receive_buf = n_tty_receive_buf, .write_wakeup = n_tty_write_wakeup, .receive_buf2 = n_tty_receive_buf2, .lookahead_buf = n_tty_lookahead_flow_ctrl, }; /** * n_tty_inherit_ops - inherit N_TTY methods * @ops: struct tty_ldisc_ops where to save N_TTY methods * * Enables a 'subclass' line discipline to 'inherit' N_TTY methods. */ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) { *ops = n_tty_ops; ops->owner = NULL; } EXPORT_SYMBOL_GPL(n_tty_inherit_ops); void __init n_tty_init(void) { tty_register_ldisc(&n_tty_ops); }
1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 // SPDX-License-Identifier: GPL-2.0-only /* * Driver for DiBcom DiB3000MC/P-demodulator. * * Copyright (C) 2004-7 DiBcom (http://www.dibcom.fr/) * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This code is partially based on the previous dib3000mc.c . */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <media/dvb_frontend.h> #include "dib3000mc.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); static int buggy_sfn_workaround; module_param(buggy_sfn_workaround, int, 0644); MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)"); #define dprintk(fmt, arg...) do { \ if (debug) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) struct dib3000mc_state { struct dvb_frontend demod; struct dib3000mc_config *cfg; u8 i2c_addr; struct i2c_adapter *i2c_adap; struct dibx000_i2c_master i2c_master; u32 timf; u32 current_bandwidth; u16 dev_id; u8 sfn_workaround_active :1; }; static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) { struct i2c_msg msg[2] = { { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 }, { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 }, }; u16 word; u8 *b; b = kmalloc(4, GFP_KERNEL); if (!b) return 0; b[0] = (reg >> 8) | 0x80; b[1] = reg; b[2] = 0; b[3] = 0; msg[0].buf = b; msg[1].buf = b + 2; if (i2c_transfer(state->i2c_adap, msg, 2) != 2) dprintk("i2c read error on %d\n",reg); word = (b[2] << 8) | b[3]; kfree(b); return word; } static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) { struct i2c_msg msg = { .addr = state->i2c_addr >> 1, .flags = 0, .len = 4 }; int rc; u8 *b; b = kmalloc(4, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = reg >> 8; b[1] = reg; b[2] = val >> 8; b[3] = val; msg.buf = b; rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; kfree(b); return rc; } static int dib3000mc_identify(struct dib3000mc_state *state) { u16 value; if ((value = dib3000mc_read_word(state, 1025)) != 0x01b3) { dprintk("-E- DiB3000MC/P: wrong Vendor ID (read=0x%x)\n",value); return -EREMOTEIO; } value = dib3000mc_read_word(state, 1026); if (value != 0x3001 && value != 0x3002) { dprintk("-E- DiB3000MC/P: wrong Device ID (%x)\n",value); return -EREMOTEIO; } state->dev_id = value; dprintk("-I- found DiB3000MC/P: %x\n",state->dev_id); return 0; } static int dib3000mc_set_timing(struct dib3000mc_state *state, s16 nfft, u32 bw, u8 update_offset) { u32 timf; if (state->timf == 0) { timf = 1384402; // default value for 8MHz if (update_offset) msleep(200); // first time we do an update } else timf = state->timf; timf *= (bw / 1000); if (update_offset) { s16 tim_offs = dib3000mc_read_word(state, 416); if (tim_offs & 0x2000) tim_offs -= 0x4000; if (nfft == TRANSMISSION_MODE_2K) tim_offs *= 4; timf += tim_offs; state->timf = timf / (bw / 1000); } dprintk("timf: %d\n", timf); dib3000mc_write_word(state, 23, (u16) (timf >> 16)); dib3000mc_write_word(state, 24, (u16) (timf ) & 0xffff); return 0; } static int dib3000mc_setup_pwm_state(struct dib3000mc_state *state) { u16 reg_51, reg_52 = state->cfg->agc->setup & 0xfefb; if (state->cfg->pwm3_inversion) { reg_51 = (2 << 14) | (0 << 10) | (7 << 6) | (2 << 2) | (2 << 0); reg_52 |= (1 << 2); } else { reg_51 = (2 << 14) | (4 << 10) | (7 << 6) | (2 << 2) | (2 << 0); reg_52 |= (1 << 8); } dib3000mc_write_word(state, 51, reg_51); dib3000mc_write_word(state, 52, reg_52); if (state->cfg->use_pwm3) dib3000mc_write_word(state, 245, (1 << 3) | (1 << 0)); else dib3000mc_write_word(state, 245, 0); dib3000mc_write_word(state, 1040, 0x3); return 0; } static int dib3000mc_set_output_mode(struct dib3000mc_state *state, int mode) { int ret = 0; u16 fifo_threshold = 1792; u16 outreg = 0; u16 outmode = 0; u16 elecout = 1; u16 smo_reg = dib3000mc_read_word(state, 206) & 0x0010; /* keep the pid_parse bit */ dprintk("-I- Setting output mode for demod %p to %d\n", &state->demod, mode); switch (mode) { case OUTMODE_HIGH_Z: // disable elecout = 0; break; case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock outmode = 0; break; case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock outmode = 1; break; case OUTMODE_MPEG2_SERIAL: // STBs with serial input outmode = 2; break; case OUTMODE_MPEG2_FIFO: // e.g. USB feeding elecout = 3; /*ADDR @ 206 : P_smo_error_discard [1;6:6] = 0 P_smo_rs_discard [1;5:5] = 0 P_smo_pid_parse [1;4:4] = 0 P_smo_fifo_flush [1;3:3] = 0 P_smo_mode [2;2:1] = 11 P_smo_ovf_prot [1;0:0] = 0 */ smo_reg |= 3 << 1; fifo_threshold = 512; outmode = 5; break; case OUTMODE_DIVERSITY: outmode = 4; elecout = 1; break; default: dprintk("Unhandled output_mode passed to be set for demod %p\n",&state->demod); outmode = 0; break; } if ((state->cfg->output_mpeg2_in_188_bytes)) smo_reg |= (1 << 5); // P_smo_rs_discard [1;5:5] = 1 outreg = dib3000mc_read_word(state, 244) & 0x07FF; outreg |= (outmode << 11); ret |= dib3000mc_write_word(state, 244, outreg); ret |= dib3000mc_write_word(state, 206, smo_reg); /*smo_ mode*/ ret |= dib3000mc_write_word(state, 207, fifo_threshold); /* synchronous fread */ ret |= dib3000mc_write_word(state, 1040, elecout); /* P_out_cfg */ return ret; } static int dib3000mc_set_bandwidth(struct dib3000mc_state *state, u32 bw) { u16 bw_cfg[6] = { 0 }; u16 imp_bw_cfg[3] = { 0 }; u16 reg; /* settings here are for 27.7MHz */ switch (bw) { case 8000: bw_cfg[0] = 0x0019; bw_cfg[1] = 0x5c30; bw_cfg[2] = 0x0054; bw_cfg[3] = 0x88a0; bw_cfg[4] = 0x01a6; bw_cfg[5] = 0xab20; imp_bw_cfg[0] = 0x04db; imp_bw_cfg[1] = 0x00db; imp_bw_cfg[2] = 0x00b7; break; case 7000: bw_cfg[0] = 0x001c; bw_cfg[1] = 0xfba5; bw_cfg[2] = 0x0060; bw_cfg[3] = 0x9c25; bw_cfg[4] = 0x01e3; bw_cfg[5] = 0x0cb7; imp_bw_cfg[0] = 0x04c0; imp_bw_cfg[1] = 0x00c0; imp_bw_cfg[2] = 0x00a0; break; case 6000: bw_cfg[0] = 0x0021; bw_cfg[1] = 0xd040; bw_cfg[2] = 0x0070; bw_cfg[3] = 0xb62b; bw_cfg[4] = 0x0233; bw_cfg[5] = 0x8ed5; imp_bw_cfg[0] = 0x04a5; imp_bw_cfg[1] = 0x00a5; imp_bw_cfg[2] = 0x0089; break; case 5000: bw_cfg[0] = 0x0028; bw_cfg[1] = 0x9380; bw_cfg[2] = 0x0087; bw_cfg[3] = 0x4100; bw_cfg[4] = 0x02a4; bw_cfg[5] = 0x4500; imp_bw_cfg[0] = 0x0489; imp_bw_cfg[1] = 0x0089; imp_bw_cfg[2] = 0x0072; break; default: return -EINVAL; } for (reg = 6; reg < 12; reg++) dib3000mc_write_word(state, reg, bw_cfg[reg - 6]); dib3000mc_write_word(state, 12, 0x0000); dib3000mc_write_word(state, 13, 0x03e8); dib3000mc_write_word(state, 14, 0x0000); dib3000mc_write_word(state, 15, 0x03f2); dib3000mc_write_word(state, 16, 0x0001); dib3000mc_write_word(state, 17, 0xb0d0); // P_sec_len dib3000mc_write_word(state, 18, 0x0393); dib3000mc_write_word(state, 19, 0x8700); for (reg = 55; reg < 58; reg++) dib3000mc_write_word(state, reg, imp_bw_cfg[reg - 55]); // Timing configuration dib3000mc_set_timing(state, TRANSMISSION_MODE_2K, bw, 0); return 0; } static u16 impulse_noise_val[29] = { 0x38, 0x6d9, 0x3f28, 0x7a7, 0x3a74, 0x196, 0x32a, 0x48c, 0x3ffe, 0x7f3, 0x2d94, 0x76, 0x53d, 0x3ff8, 0x7e3, 0x3320, 0x76, 0x5b3, 0x3feb, 0x7d2, 0x365e, 0x76, 0x48c, 0x3ffe, 0x5b3, 0x3feb, 0x76, 0x0000, 0xd }; static void dib3000mc_set_impulse_noise(struct dib3000mc_state *state, u8 mode, s16 nfft) { u16 i; for (i = 58; i < 87; i++) dib3000mc_write_word(state, i, impulse_noise_val[i-58]); if (nfft == TRANSMISSION_MODE_8K) { dib3000mc_write_word(state, 58, 0x3b); dib3000mc_write_word(state, 84, 0x00); dib3000mc_write_word(state, 85, 0x8200); } dib3000mc_write_word(state, 34, 0x1294); dib3000mc_write_word(state, 35, 0x1ff8); if (mode == 1) dib3000mc_write_word(state, 55, dib3000mc_read_word(state, 55) | (1 << 10)); } static int dib3000mc_init(struct dvb_frontend *demod) { struct dib3000mc_state *state = demod->demodulator_priv; struct dibx000_agc_config *agc = state->cfg->agc; // Restart Configuration dib3000mc_write_word(state, 1027, 0x8000); dib3000mc_write_word(state, 1027, 0x0000); // power up the demod + mobility configuration dib3000mc_write_word(state, 140, 0x0000); dib3000mc_write_word(state, 1031, 0); if (state->cfg->mobile_mode) { dib3000mc_write_word(state, 139, 0x0000); dib3000mc_write_word(state, 141, 0x0000); dib3000mc_write_word(state, 175, 0x0002); dib3000mc_write_word(state, 1032, 0x0000); } else { dib3000mc_write_word(state, 139, 0x0001); dib3000mc_write_word(state, 141, 0x0000); dib3000mc_write_word(state, 175, 0x0000); dib3000mc_write_word(state, 1032, 0x012C); } dib3000mc_write_word(state, 1033, 0x0000); // P_clk_cfg dib3000mc_write_word(state, 1037, 0x3130); // other configurations // P_ctrl_sfreq dib3000mc_write_word(state, 33, (5 << 0)); dib3000mc_write_word(state, 88, (1 << 10) | (0x10 << 0)); // Phase noise control // P_fft_phacor_inh, P_fft_phacor_cpe, P_fft_powrange dib3000mc_write_word(state, 99, (1 << 9) | (0x20 << 0)); if (state->cfg->phase_noise_mode == 0) dib3000mc_write_word(state, 111, 0x00); else dib3000mc_write_word(state, 111, 0x02); // P_agc_global dib3000mc_write_word(state, 50, 0x8000); // agc setup misc dib3000mc_setup_pwm_state(state); // P_agc_counter_lock dib3000mc_write_word(state, 53, 0x87); // P_agc_counter_unlock dib3000mc_write_word(state, 54, 0x87); /* agc */ dib3000mc_write_word(state, 36, state->cfg->max_time); dib3000mc_write_word(state, 37, (state->cfg->agc_command1 << 13) | (state->cfg->agc_command2 << 12) | (0x1d << 0)); dib3000mc_write_word(state, 38, state->cfg->pwm3_value); dib3000mc_write_word(state, 39, state->cfg->ln_adc_level); // set_agc_loop_Bw dib3000mc_write_word(state, 40, 0x0179); dib3000mc_write_word(state, 41, 0x03f0); dib3000mc_write_word(state, 42, agc->agc1_max); dib3000mc_write_word(state, 43, agc->agc1_min); dib3000mc_write_word(state, 44, agc->agc2_max); dib3000mc_write_word(state, 45, agc->agc2_min); dib3000mc_write_word(state, 46, (agc->agc1_pt1 << 8) | agc->agc1_pt2); dib3000mc_write_word(state, 47, (agc->agc1_slope1 << 8) | agc->agc1_slope2); dib3000mc_write_word(state, 48, (agc->agc2_pt1 << 8) | agc->agc2_pt2); dib3000mc_write_word(state, 49, (agc->agc2_slope1 << 8) | agc->agc2_slope2); // Begin: TimeOut registers // P_pha3_thres dib3000mc_write_word(state, 110, 3277); // P_timf_alpha = 6, P_corm_alpha = 6, P_corm_thres = 0x80 dib3000mc_write_word(state, 26, 0x6680); // lock_mask0 dib3000mc_write_word(state, 1, 4); // lock_mask1 dib3000mc_write_word(state, 2, 4); // lock_mask2 dib3000mc_write_word(state, 3, 0x1000); // P_search_maxtrial=1 dib3000mc_write_word(state, 5, 1); dib3000mc_set_bandwidth(state, 8000); // div_lock_mask dib3000mc_write_word(state, 4, 0x814); dib3000mc_write_word(state, 21, (1 << 9) | 0x164); dib3000mc_write_word(state, 22, 0x463d); // Spurious rm cfg // P_cspu_regul, P_cspu_win_cut dib3000mc_write_word(state, 120, 0x200f); // P_adp_selec_monit dib3000mc_write_word(state, 134, 0); // Fec cfg dib3000mc_write_word(state, 195, 0x10); // diversity register: P_dvsy_sync_wait.. dib3000mc_write_word(state, 180, 0x2FF0); // Impulse noise configuration dib3000mc_set_impulse_noise(state, 0, TRANSMISSION_MODE_8K); // output mode set-up dib3000mc_set_output_mode(state, OUTMODE_HIGH_Z); /* close the i2c-gate */ dib3000mc_write_word(state, 769, (1 << 7) ); return 0; } static int dib3000mc_sleep(struct dvb_frontend *demod) { struct dib3000mc_state *state = demod->demodulator_priv; dib3000mc_write_word(state, 1031, 0xFFFF); dib3000mc_write_word(state, 1032, 0xFFFF); dib3000mc_write_word(state, 1033, 0xFFF0); return 0; } static void dib3000mc_set_adp_cfg(struct dib3000mc_state *state, s16 qam) { u16 cfg[4] = { 0 },reg; switch (qam) { case QPSK: cfg[0] = 0x099a; cfg[1] = 0x7fae; cfg[2] = 0x0333; cfg[3] = 0x7ff0; break; case QAM_16: cfg[0] = 0x023d; cfg[1] = 0x7fdf; cfg[2] = 0x00a4; cfg[3] = 0x7ff0; break; case QAM_64: cfg[0] = 0x0148; cfg[1] = 0x7ff0; cfg[2] = 0x00a4; cfg[3] = 0x7ff8; break; } for (reg = 129; reg < 133; reg++) dib3000mc_write_word(state, reg, cfg[reg - 129]); } static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state, struct dtv_frontend_properties *ch, u16 seq) { u16 value; u32 bw = BANDWIDTH_TO_KHZ(ch->bandwidth_hz); dib3000mc_set_bandwidth(state, bw); dib3000mc_set_timing(state, ch->transmission_mode, bw, 0); #if 1 dib3000mc_write_word(state, 100, (16 << 6) + 9); #else if (boost) dib3000mc_write_word(state, 100, (11 << 6) + 6); else dib3000mc_write_word(state, 100, (16 << 6) + 9); #endif dib3000mc_write_word(state, 1027, 0x0800); dib3000mc_write_word(state, 1027, 0x0000); //Default cfg isi offset adp dib3000mc_write_word(state, 26, 0x6680); dib3000mc_write_word(state, 29, 0x1273); dib3000mc_write_word(state, 33, 5); dib3000mc_set_adp_cfg(state, QAM_16); dib3000mc_write_word(state, 133, 15564); dib3000mc_write_word(state, 12 , 0x0); dib3000mc_write_word(state, 13 , 0x3e8); dib3000mc_write_word(state, 14 , 0x0); dib3000mc_write_word(state, 15 , 0x3f2); dib3000mc_write_word(state, 93,0); dib3000mc_write_word(state, 94,0); dib3000mc_write_word(state, 95,0); dib3000mc_write_word(state, 96,0); dib3000mc_write_word(state, 97,0); dib3000mc_write_word(state, 98,0); dib3000mc_set_impulse_noise(state, 0, ch->transmission_mode); value = 0; switch (ch->transmission_mode) { case TRANSMISSION_MODE_2K: value |= (0 << 7); break; default: case TRANSMISSION_MODE_8K: value |= (1 << 7); break; } switch (ch->guard_interval) { case GUARD_INTERVAL_1_32: value |= (0 << 5); break; case GUARD_INTERVAL_1_16: value |= (1 << 5); break; case GUARD_INTERVAL_1_4: value |= (3 << 5); break; default: case GUARD_INTERVAL_1_8: value |= (2 << 5); break; } switch (ch->modulation) { case QPSK: value |= (0 << 3); break; case QAM_16: value |= (1 << 3); break; default: case QAM_64: value |= (2 << 3); break; } switch (HIERARCHY_1) { case HIERARCHY_2: value |= 2; break; case HIERARCHY_4: value |= 4; break; default: case HIERARCHY_1: value |= 1; break; } dib3000mc_write_word(state, 0, value); dib3000mc_write_word(state, 5, (1 << 8) | ((seq & 0xf) << 4)); value = 0; if (ch->hierarchy == 1) value |= (1 << 4); if (1 == 1) value |= 1; switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) { case FEC_2_3: value |= (2 << 1); break; case FEC_3_4: value |= (3 << 1); break; case FEC_5_6: value |= (5 << 1); break; case FEC_7_8: value |= (7 << 1); break; default: case FEC_1_2: value |= (1 << 1); break; } dib3000mc_write_word(state, 181, value); // diversity synchro delay add 50% SFN margin switch (ch->transmission_mode) { case TRANSMISSION_MODE_8K: value = 256; break; case TRANSMISSION_MODE_2K: default: value = 64; break; } switch (ch->guard_interval) { case GUARD_INTERVAL_1_16: value *= 2; break; case GUARD_INTERVAL_1_8: value *= 4; break; case GUARD_INTERVAL_1_4: value *= 8; break; default: case GUARD_INTERVAL_1_32: value *= 1; break; } value <<= 4; value |= dib3000mc_read_word(state, 180) & 0x000f; dib3000mc_write_word(state, 180, value); // restart demod value = dib3000mc_read_word(state, 0); dib3000mc_write_word(state, 0, value | (1 << 9)); dib3000mc_write_word(state, 0, value); msleep(30); dib3000mc_set_impulse_noise(state, state->cfg->impulse_noise_mode, ch->transmission_mode); } static int dib3000mc_autosearch_start(struct dvb_frontend *demod) { struct dtv_frontend_properties *chan = &demod->dtv_property_cache; struct dib3000mc_state *state = demod->demodulator_priv; u16 reg; // u32 val; struct dtv_frontend_properties schan; schan = *chan; /* TODO what is that ? */ /* a channel for autosearch */ schan.transmission_mode = TRANSMISSION_MODE_8K; schan.guard_interval = GUARD_INTERVAL_1_32; schan.modulation = QAM_64; schan.code_rate_HP = FEC_2_3; schan.code_rate_LP = FEC_2_3; schan.hierarchy = 0; dib3000mc_set_channel_cfg(state, &schan, 11); reg = dib3000mc_read_word(state, 0); dib3000mc_write_word(state, 0, reg | (1 << 8)); dib3000mc_read_word(state, 511); dib3000mc_write_word(state, 0, reg); return 0; } static int dib3000mc_autosearch_is_irq(struct dvb_frontend *demod) { struct dib3000mc_state *state = demod->demodulator_priv; u16 irq_pending = dib3000mc_read_word(state, 511); if (irq_pending & 0x1) // failed return 1; if (irq_pending & 0x2) // succeeded return 2; return 0; // still pending } static int dib3000mc_tune(struct dvb_frontend *demod) { struct dtv_frontend_properties *ch = &demod->dtv_property_cache; struct dib3000mc_state *state = demod->demodulator_priv; // ** configure demod ** dib3000mc_set_channel_cfg(state, ch, 0); // activates isi if (state->sfn_workaround_active) { dprintk("SFN workaround is active\n"); dib3000mc_write_word(state, 29, 0x1273); dib3000mc_write_word(state, 108, 0x4000); // P_pha3_force_pha_shift } else { dib3000mc_write_word(state, 29, 0x1073); dib3000mc_write_word(state, 108, 0x0000); // P_pha3_force_pha_shift } dib3000mc_set_adp_cfg(state, (u8)ch->modulation); if (ch->transmission_mode == TRANSMISSION_MODE_8K) { dib3000mc_write_word(state, 26, 38528); dib3000mc_write_word(state, 33, 8); } else { dib3000mc_write_word(state, 26, 30336); dib3000mc_write_word(state, 33, 6); } if (dib3000mc_read_word(state, 509) & 0x80) dib3000mc_set_timing(state, ch->transmission_mode, BANDWIDTH_TO_KHZ(ch->bandwidth_hz), 1); return 0; } struct i2c_adapter * dib3000mc_get_tuner_i2c_master(struct dvb_frontend *demod, int gating) { struct dib3000mc_state *st = demod->demodulator_priv; return dibx000_get_i2c_adapter(&st->i2c_master, DIBX000_I2C_INTERFACE_TUNER, gating); } EXPORT_SYMBOL(dib3000mc_get_tuner_i2c_master); static int dib3000mc_get_frontend(struct dvb_frontend* fe, struct dtv_frontend_properties *fep) { struct dib3000mc_state *state = fe->demodulator_priv; u16 tps = dib3000mc_read_word(state,458); fep->inversion = INVERSION_AUTO; fep->bandwidth_hz = state->current_bandwidth; switch ((tps >> 8) & 0x1) { case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break; case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break; } switch (tps & 0x3) { case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break; case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break; case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break; case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break; } switch ((tps >> 13) & 0x3) { case 0: fep->modulation = QPSK; break; case 1: fep->modulation = QAM_16; break; case 2: default: fep->modulation = QAM_64; break; } /* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */ /* (tps >> 12) & 0x1 == hrch is used, (tps >> 9) & 0x7 == alpha */ fep->hierarchy = HIERARCHY_NONE; switch ((tps >> 5) & 0x7) { case 1: fep->code_rate_HP = FEC_1_2; break; case 2: fep->code_rate_HP = FEC_2_3; break; case 3: fep->code_rate_HP = FEC_3_4; break; case 5: fep->code_rate_HP = FEC_5_6; break; case 7: default: fep->code_rate_HP = FEC_7_8; break; } switch ((tps >> 2) & 0x7) { case 1: fep->code_rate_LP = FEC_1_2; break; case 2: fep->code_rate_LP = FEC_2_3; break; case 3: fep->code_rate_LP = FEC_3_4; break; case 5: fep->code_rate_LP = FEC_5_6; break; case 7: default: fep->code_rate_LP = FEC_7_8; break; } return 0; } static int dib3000mc_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dib3000mc_state *state = fe->demodulator_priv; int ret; dib3000mc_set_output_mode(state, OUTMODE_HIGH_Z); state->current_bandwidth = fep->bandwidth_hz; dib3000mc_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->bandwidth_hz)); /* maybe the parameter has been changed */ state->sfn_workaround_active = buggy_sfn_workaround; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); msleep(100); } if (fep->transmission_mode == TRANSMISSION_MODE_AUTO || fep->guard_interval == GUARD_INTERVAL_AUTO || fep->modulation == QAM_AUTO || fep->code_rate_HP == FEC_AUTO) { int i = 1000, found; dib3000mc_autosearch_start(fe); do { msleep(1); found = dib3000mc_autosearch_is_irq(fe); } while (found == 0 && i--); dprintk("autosearch returns: %d\n",found); if (found == 0 || found == 1) return 0; // no channel found dib3000mc_get_frontend(fe, fep); } ret = dib3000mc_tune(fe); /* make this a config parameter */ dib3000mc_set_output_mode(state, OUTMODE_MPEG2_FIFO); return ret; } static int dib3000mc_read_status(struct dvb_frontend *fe, enum fe_status *stat) { struct dib3000mc_state *state = fe->demodulator_priv; u16 lock = dib3000mc_read_word(state, 509); *stat = 0; if (lock & 0x8000) *stat |= FE_HAS_SIGNAL; if (lock & 0x3000) *stat |= FE_HAS_CARRIER; if (lock & 0x0100) *stat |= FE_HAS_VITERBI; if (lock & 0x0010) *stat |= FE_HAS_SYNC; if (lock & 0x0008) *stat |= FE_HAS_LOCK; return 0; } static int dib3000mc_read_ber(struct dvb_frontend *fe, u32 *ber) { struct dib3000mc_state *state = fe->demodulator_priv; *ber = (dib3000mc_read_word(state, 500) << 16) | dib3000mc_read_word(state, 501); return 0; } static int dib3000mc_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) { struct dib3000mc_state *state = fe->demodulator_priv; *unc = dib3000mc_read_word(state, 508); return 0; } static int dib3000mc_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dib3000mc_state *state = fe->demodulator_priv; u16 val = dib3000mc_read_word(state, 392); *strength = 65535 - val; return 0; } static int dib3000mc_read_snr(struct dvb_frontend* fe, u16 *snr) { *snr = 0x0000; return 0; } static int dib3000mc_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void dib3000mc_release(struct dvb_frontend *fe) { struct dib3000mc_state *state = fe->demodulator_priv; dibx000_exit_i2c_master(&state->i2c_master); kfree(state); } int dib3000mc_pid_control(struct dvb_frontend *fe, int index, int pid,int onoff) { struct dib3000mc_state *state = fe->demodulator_priv; dib3000mc_write_word(state, 212 + index, onoff ? (1 << 13) | pid : 0); return 0; } EXPORT_SYMBOL(dib3000mc_pid_control); int dib3000mc_pid_parse(struct dvb_frontend *fe, int onoff) { struct dib3000mc_state *state = fe->demodulator_priv; u16 tmp = dib3000mc_read_word(state, 206) & ~(1 << 4); tmp |= (onoff << 4); return dib3000mc_write_word(state, 206, tmp); } EXPORT_SYMBOL(dib3000mc_pid_parse); void dib3000mc_set_config(struct dvb_frontend *fe, struct dib3000mc_config *cfg) { struct dib3000mc_state *state = fe->demodulator_priv; state->cfg = cfg; } EXPORT_SYMBOL(dib3000mc_set_config); int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib3000mc_config cfg[]) { struct dib3000mc_state *dmcst; int k; u8 new_addr; static const u8 DIB3000MC_I2C_ADDRESS[] = { 20, 22, 24, 26 }; dmcst = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL); if (dmcst == NULL) return -ENOMEM; dmcst->i2c_adap = i2c; for (k = no_of_demods-1; k >= 0; k--) { dmcst->cfg = &cfg[k]; /* designated i2c address */ new_addr = DIB3000MC_I2C_ADDRESS[k]; dmcst->i2c_addr = new_addr; if (dib3000mc_identify(dmcst) != 0) { dmcst->i2c_addr = default_addr; if (dib3000mc_identify(dmcst) != 0) { dprintk("-E- DiB3000P/MC #%d: not identified\n", k); kfree(dmcst); return -ENODEV; } } dib3000mc_set_output_mode(dmcst, OUTMODE_MPEG2_PAR_CONT_CLK); // set new i2c address and force divstr (Bit 1) to value 0 (Bit 0) dib3000mc_write_word(dmcst, 1024, (new_addr << 3) | 0x1); dmcst->i2c_addr = new_addr; } for (k = 0; k < no_of_demods; k++) { dmcst->cfg = &cfg[k]; dmcst->i2c_addr = DIB3000MC_I2C_ADDRESS[k]; dib3000mc_write_word(dmcst, 1024, dmcst->i2c_addr << 3); /* turn off data output */ dib3000mc_set_output_mode(dmcst, OUTMODE_HIGH_Z); } kfree(dmcst); return 0; } EXPORT_SYMBOL(dib3000mc_i2c_enumeration); static const struct dvb_frontend_ops dib3000mc_ops; struct dvb_frontend * dib3000mc_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib3000mc_config *cfg) { struct dvb_frontend *demod; struct dib3000mc_state *st; st = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL); if (st == NULL) return NULL; st->cfg = cfg; st->i2c_adap = i2c_adap; st->i2c_addr = i2c_addr; demod = &st->demod; demod->demodulator_priv = st; memcpy(&st->demod.ops, &dib3000mc_ops, sizeof(struct dvb_frontend_ops)); if (dib3000mc_identify(st) != 0) goto error; dibx000_init_i2c_master(&st->i2c_master, DIB3000MC, st->i2c_adap, st->i2c_addr); dib3000mc_write_word(st, 1037, 0x3130); return demod; error: kfree(st); return NULL; } EXPORT_SYMBOL_GPL(dib3000mc_attach); static const struct dvb_frontend_ops dib3000mc_ops = { .delsys = { SYS_DVBT }, .info = { .name = "DiBcom 3000MC/P", .frequency_min_hz = 44250 * kHz, .frequency_max_hz = 867250 * kHz, .frequency_stepsize_hz = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib3000mc_release, .init = dib3000mc_init, .sleep = dib3000mc_sleep, .set_frontend = dib3000mc_set_frontend, .get_tune_settings = dib3000mc_fe_get_tune_settings, .get_frontend = dib3000mc_get_frontend, .read_status = dib3000mc_read_status, .read_ber = dib3000mc_read_ber, .read_signal_strength = dib3000mc_read_signal_strength, .read_snr = dib3000mc_read_snr, .read_ucblocks = dib3000mc_read_unc_blocks, }; MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the DiBcom 3000MC/P COFDM demodulator"); MODULE_LICENSE("GPL");
4 4 4 4 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 // SPDX-License-Identifier: GPL-2.0-only // // ethtool interface for Ethernet PSE (Power Sourcing Equipment) // and PD (Powered Device) // // Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> // #include "common.h" #include "linux/pse-pd/pse.h" #include "netlink.h" #include <linux/ethtool_netlink.h> #include <linux/ethtool.h> #include <linux/phy.h> struct pse_req_info { struct ethnl_req_info base; }; struct pse_reply_data { struct ethnl_reply_data base; struct pse_control_status status; }; #define PSE_REPDATA(__reply_base) \ container_of(__reply_base, struct pse_reply_data, base) /* PSE_GET */ const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1] = { [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), }; static int pse_get_pse_attributes(struct net_device *dev, struct netlink_ext_ack *extack, struct pse_reply_data *data) { struct phy_device *phydev = dev->phydev; if (!phydev) { NL_SET_ERR_MSG(extack, "No PHY is attached"); return -EOPNOTSUPP; } if (!phydev->psec) { NL_SET_ERR_MSG(extack, "No PSE is attached"); return -EOPNOTSUPP; } memset(&data->status, 0, sizeof(data->status)); return pse_ethtool_get_status(phydev->psec, extack, &data->status); } static int pse_prepare_data(const struct ethnl_req_info *req_base, struct ethnl_reply_data *reply_base, const struct genl_info *info) { struct pse_reply_data *data = PSE_REPDATA(reply_base); struct net_device *dev = reply_base->dev; int ret; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; ret = pse_get_pse_attributes(dev, info->extack, data); ethnl_ops_complete(dev); return ret; } static int pse_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { const struct pse_reply_data *data = PSE_REPDATA(reply_base); const struct pse_control_status *st = &data->status; int len = 0; if (st->podl_admin_state > 0) len += nla_total_size(sizeof(u32)); /* _PODL_PSE_ADMIN_STATE */ if (st->podl_pw_status > 0) len += nla_total_size(sizeof(u32)); /* _PODL_PSE_PW_D_STATUS */ if (st->c33_admin_state > 0) len += nla_total_size(sizeof(u32)); /* _C33_PSE_ADMIN_STATE */ if (st->c33_pw_status > 0) len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_D_STATUS */ if (st->c33_pw_class > 0) len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_CLASS */ if (st->c33_actual_pw > 0) len += nla_total_size(sizeof(u32)); /* _C33_PSE_ACTUAL_PW */ if (st->c33_ext_state_info.c33_pse_ext_state > 0) { len += nla_total_size(sizeof(u32)); /* _C33_PSE_EXT_STATE */ if (st->c33_ext_state_info.__c33_pse_ext_substate > 0) /* _C33_PSE_EXT_SUBSTATE */ len += nla_total_size(sizeof(u32)); } if (st->c33_avail_pw_limit > 0) /* _C33_AVAIL_PSE_PW_LIMIT */ len += nla_total_size(sizeof(u32)); if (st->c33_pw_limit_nb_ranges > 0) /* _C33_PSE_PW_LIMIT_RANGES */ len += st->c33_pw_limit_nb_ranges * (nla_total_size(0) + nla_total_size(sizeof(u32)) * 2); return len; } static int pse_put_pw_limit_ranges(struct sk_buff *skb, const struct pse_control_status *st) { const struct ethtool_c33_pse_pw_limit_range *pw_limit_ranges; int i; pw_limit_ranges = st->c33_pw_limit_ranges; for (i = 0; i < st->c33_pw_limit_nb_ranges; i++) { struct nlattr *nest; nest = nla_nest_start(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES); if (!nest) return -EMSGSIZE; if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MIN, pw_limit_ranges->min) || nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MAX, pw_limit_ranges->max)) { nla_nest_cancel(skb, nest); return -EMSGSIZE; } nla_nest_end(skb, nest); pw_limit_ranges++; } return 0; } static int pse_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { const struct pse_reply_data *data = PSE_REPDATA(reply_base); const struct pse_control_status *st = &data->status; if (st->podl_admin_state > 0 && nla_put_u32(skb, ETHTOOL_A_PODL_PSE_ADMIN_STATE, st->podl_admin_state)) return -EMSGSIZE; if (st->podl_pw_status > 0 && nla_put_u32(skb, ETHTOOL_A_PODL_PSE_PW_D_STATUS, st->podl_pw_status)) return -EMSGSIZE; if (st->c33_admin_state > 0 && nla_put_u32(skb, ETHTOOL_A_C33_PSE_ADMIN_STATE, st->c33_admin_state)) return -EMSGSIZE; if (st->c33_pw_status > 0 && nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_D_STATUS, st->c33_pw_status)) return -EMSGSIZE; if (st->c33_pw_class > 0 && nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_CLASS, st->c33_pw_class)) return -EMSGSIZE; if (st->c33_actual_pw > 0 && nla_put_u32(skb, ETHTOOL_A_C33_PSE_ACTUAL_PW, st->c33_actual_pw)) return -EMSGSIZE; if (st->c33_ext_state_info.c33_pse_ext_state > 0) { if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_STATE, st->c33_ext_state_info.c33_pse_ext_state)) return -EMSGSIZE; if (st->c33_ext_state_info.__c33_pse_ext_substate > 0 && nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_SUBSTATE, st->c33_ext_state_info.__c33_pse_ext_substate)) return -EMSGSIZE; } if (st->c33_avail_pw_limit > 0 && nla_put_u32(skb, ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT, st->c33_avail_pw_limit)) return -EMSGSIZE; if (st->c33_pw_limit_nb_ranges > 0 && pse_put_pw_limit_ranges(skb, st)) return -EMSGSIZE; return 0; } static void pse_cleanup_data(struct ethnl_reply_data *reply_base) { const struct pse_reply_data *data = PSE_REPDATA(reply_base); kfree(data->status.c33_pw_limit_ranges); } /* PSE_SET */ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = { [ETHTOOL_A_PSE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), [ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] = NLA_POLICY_RANGE(NLA_U32, ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED, ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED), [ETHTOOL_A_C33_PSE_ADMIN_CONTROL] = NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED, ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED), [ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 }, }; static int ethnl_set_pse_validate(struct ethnl_req_info *req_info, struct genl_info *info) { struct net_device *dev = req_info->dev; struct nlattr **tb = info->attrs; struct phy_device *phydev; phydev = dev->phydev; if (!phydev) { NL_SET_ERR_MSG(info->extack, "No PHY is attached"); return -EOPNOTSUPP; } if (!phydev->psec) { NL_SET_ERR_MSG(info->extack, "No PSE is attached"); return -EOPNOTSUPP; } if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] && !pse_has_podl(phydev->psec)) { NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL], "setting PoDL PSE admin control not supported"); return -EOPNOTSUPP; } if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL] && !pse_has_c33(phydev->psec)) { NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL], "setting C33 PSE admin control not supported"); return -EOPNOTSUPP; } return 1; } static int ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) { struct net_device *dev = req_info->dev; struct nlattr **tb = info->attrs; struct phy_device *phydev; int ret = 0; phydev = dev->phydev; if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) { unsigned int pw_limit; pw_limit = nla_get_u32(tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]); ret = pse_ethtool_set_pw_limit(phydev->psec, info->extack, pw_limit); if (ret) return ret; } /* These values are already validated by the ethnl_pse_set_policy */ if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] || tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) { struct pse_control_config config = {}; if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]) config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]); /* pse_ethtool_set_config() will do nothing if the config * is zero */ ret = pse_ethtool_set_config(phydev->psec, info->extack, &config); if (ret) return ret; } /* Return errno or zero - PSE has no notification */ return ret; } const struct ethnl_request_ops ethnl_pse_request_ops = { .request_cmd = ETHTOOL_MSG_PSE_GET, .reply_cmd = ETHTOOL_MSG_PSE_GET_REPLY, .hdr_attr = ETHTOOL_A_PSE_HEADER, .req_info_size = sizeof(struct pse_req_info), .reply_data_size = sizeof(struct pse_reply_data), .prepare_data = pse_prepare_data, .reply_size = pse_reply_size, .fill_reply = pse_fill_reply, .cleanup_data = pse_cleanup_data, .set_validate = ethnl_set_pse_validate, .set = ethnl_set_pse, /* PSE has no notification */ };
14 10 10 15 15 15 15 14 15 9 9 9 9 492 495 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2007 Andi Kleen, SUSE Labs. * * This contains most of the x86 vDSO kernel-side code. */ #include <linux/mm.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/random.h> #include <linux/elf.h> #include <linux/cpu.h> #include <linux/ptrace.h> #include <linux/time_namespace.h> #include <asm/pvclock.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> #include <asm/vvar.h> #include <asm/tlb.h> #include <asm/page.h> #include <asm/desc.h> #include <asm/cpufeature.h> #include <clocksource/hyperv_timer.h> #undef _ASM_X86_VVAR_H #define EMIT_VVAR(name, offset) \ const size_t name ## _offset = offset; #include <asm/vvar.h> struct vdso_data *arch_get_vdso_data(void *vvar_page) { return (struct vdso_data *)(vvar_page + _vdso_data_offset); } #undef EMIT_VVAR unsigned int vclocks_used __read_mostly; #if defined(CONFIG_X86_64) unsigned int __read_mostly vdso64_enabled = 1; #endif int __init init_vdso_image(const struct vdso_image *image) { BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32); BUG_ON(image->size % PAGE_SIZE != 0); apply_alternatives((struct alt_instr *)(image->data + image->alt), (struct alt_instr *)(image->data + image->alt + image->alt_len)); return 0; } static const struct vm_special_mapping vvar_mapping; struct linux_binprm; static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { const struct vdso_image *image = vma->vm_mm->context.vdso_image; if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) return VM_FAULT_SIGBUS; vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); get_page(vmf->page); return 0; } static void vdso_fix_landing(const struct vdso_image *image, struct vm_area_struct *new_vma) { #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION if (in_ia32_syscall() && image == &vdso_image_32) { struct pt_regs *regs = current_pt_regs(); unsigned long vdso_land = image->sym_int80_landing_pad; unsigned long old_land_addr = vdso_land + (unsigned long)current->mm->context.vdso; /* Fixing userspace landing - look at do_fast_syscall_32 */ if (regs->ip == old_land_addr) regs->ip = new_vma->vm_start + vdso_land; } #endif } static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { const struct vdso_image *image = current->mm->context.vdso_image; vdso_fix_landing(image, new_vma); current->mm->context.vdso = (void __user *)new_vma->vm_start; return 0; } #ifdef CONFIG_TIME_NS /* * The vvar page layout depends on whether a task belongs to the root or * non-root time namespace. Whenever a task changes its namespace, the VVAR * page tables are cleared and then they will re-faulted with a * corresponding layout. * See also the comment near timens_setup_vdso_data() for details. */ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, &vvar_mapping)) zap_vma_pages(vma); } mmap_read_unlock(mm); return 0; } #endif static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { const struct vdso_image *image = vma->vm_mm->context.vdso_image; unsigned long pfn; long sym_offset; if (!image) return VM_FAULT_SIGBUS; sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + image->sym_vvar_start; /* * Sanity check: a symbol offset of zero means that the page * does not exist for this vdso image, not that the page is at * offset zero relative to the text mapping. This should be * impossible here, because sym_offset should only be zero for * the page past the end of the vvar mapping. */ if (sym_offset == 0) return VM_FAULT_SIGBUS; if (sym_offset == image->sym_vvar_page) { struct page *timens_page = find_timens_vvar_page(vma); pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT; /* * If a task belongs to a time namespace then a namespace * specific VVAR is mapped with the sym_vvar_page offset and * the real VVAR page is mapped with the sym_timens_page * offset. * See also the comment near timens_setup_vdso_data(). */ if (timens_page) { unsigned long addr; vm_fault_t err; /* * Optimization: inside time namespace pre-fault * VVAR page too. As on timens page there are only * offsets for clocks on VVAR, it'll be faulted * shortly by VDSO code. */ addr = vmf->address + (image->sym_timens_page - sym_offset); err = vmf_insert_pfn(vma, addr, pfn); if (unlikely(err & VM_FAULT_ERROR)) return err; pfn = page_to_pfn(timens_page); } return vmf_insert_pfn(vma, vmf->address, pfn); } else if (sym_offset == image->sym_pvclock_page) { struct pvclock_vsyscall_time_info *pvti = pvclock_get_pvti_cpu0_va(); if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) { return vmf_insert_pfn_prot(vma, vmf->address, __pa(pvti) >> PAGE_SHIFT, pgprot_decrypted(vma->vm_page_prot)); } } else if (sym_offset == image->sym_hvclock_page) { pfn = hv_get_tsc_pfn(); if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK)) return vmf_insert_pfn(vma, vmf->address, pfn); } else if (sym_offset == image->sym_timens_page) { struct page *timens_page = find_timens_vvar_page(vma); if (!timens_page) return VM_FAULT_SIGBUS; pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT; return vmf_insert_pfn(vma, vmf->address, pfn); } return VM_FAULT_SIGBUS; } static const struct vm_special_mapping vdso_mapping = { .name = "[vdso]", .fault = vdso_fault, .mremap = vdso_mremap, }; static const struct vm_special_mapping vvar_mapping = { .name = "[vvar]", .fault = vvar_fault, }; /* * Add vdso and vvar mappings to current process. * @image - blob to map * @addr - request a specific address (zero to map at free addr) */ static int map_vdso(const struct vdso_image *image, unsigned long addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long text_start; int ret = 0; if (mmap_write_lock_killable(mm)) return -EINTR; addr = get_unmapped_area(NULL, addr, image->size - image->sym_vvar_start, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } text_start = addr - image->sym_vvar_start; /* * MAYWRITE to allow gdb to COW and set breakpoints */ vma = _install_special_mapping(mm, text_start, image->size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } vma = _install_special_mapping(mm, addr, -image->sym_vvar_start, VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP| VM_PFNMAP, &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); do_munmap(mm, text_start, image->size, NULL); } else { current->mm->context.vdso = (void __user *)text_start; current->mm->context.vdso_image = image; } up_fail: mmap_write_unlock(mm); return ret; } int map_vdso_once(const struct vdso_image *image, unsigned long addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mmap_write_lock(mm); /* * Check if we have already mapped vdso blob - fail to prevent * abusing from userspace install_special_mapping, which may * not do accounting and rlimit right. * We could search vma near context.vdso, but it's a slowpath, * so let's explicitly check all VMAs to be completely sure. */ for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, &vdso_mapping) || vma_is_special_mapping(vma, &vvar_mapping)) { mmap_write_unlock(mm); return -EEXIST; } } mmap_write_unlock(mm); return map_vdso(image, addr); } #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static int load_vdso32(void) { if (vdso32_enabled != 1) /* Other values all mean "disabled" */ return 0; return map_vdso(&vdso_image_32, 0); } #endif #ifdef CONFIG_X86_64 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { if (!vdso64_enabled) return 0; return map_vdso(&vdso_image_64, 0); } #ifdef CONFIG_COMPAT int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp, bool x32) { #ifdef CONFIG_X86_X32_ABI if (x32) { if (!vdso64_enabled) return 0; return map_vdso(&vdso_image_x32, 0); } #endif #ifdef CONFIG_IA32_EMULATION return load_vdso32(); #else return 0; #endif } #endif #else int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { return load_vdso32(); } #endif bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) { #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) const struct vdso_image *image = current->mm->context.vdso_image; unsigned long vdso = (unsigned long) current->mm->context.vdso; if (in_ia32_syscall() && image == &vdso_image_32) { if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad || regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad) return true; } #endif return false; } #ifdef CONFIG_X86_64 static __init int vdso_setup(char *s) { vdso64_enabled = simple_strtoul(s, NULL, 0); return 1; } __setup("vdso=", vdso_setup); #endif /* CONFIG_X86_64 */
2 140 42 152 53 282 123 78 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_WAIT_H #define _LINUX_WAIT_H /* * Linux wait queue related types and methods */ #include <linux/list.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <asm/current.h> typedef struct wait_queue_entry wait_queue_entry_t; typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); /* wait_queue_entry::flags */ #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 #define WQ_FLAG_CUSTOM 0x04 #define WQ_FLAG_DONE 0x08 #define WQ_FLAG_PRIORITY 0x10 /* * A single wait-queue entry structure: */ struct wait_queue_entry { unsigned int flags; void *private; wait_queue_func_t func; struct list_head entry; }; struct wait_queue_head { spinlock_t lock; struct list_head head; }; typedef struct wait_queue_head wait_queue_head_t; struct task_struct; /* * Macros for declaration and initialisaton of the datatypes */ #define __WAITQUEUE_INITIALIZER(name, tsk) { \ .private = tsk, \ .func = default_wake_function, \ .entry = { NULL, NULL } } #define DECLARE_WAITQUEUE(name, tsk) \ struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = LIST_HEAD_INIT(name.head) } #define DECLARE_WAIT_QUEUE_HEAD(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); #define init_waitqueue_head(wq_head) \ do { \ static struct lock_class_key __key; \ \ __init_waitqueue_head((wq_head), #wq_head, &__key); \ } while (0) #ifdef CONFIG_LOCKDEP # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ({ init_waitqueue_head(&name); name; }) # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) #else # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) #endif static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p) { wq_entry->flags = 0; wq_entry->private = p; wq_entry->func = default_wake_function; } static inline void init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func) { wq_entry->flags = 0; wq_entry->private = NULL; wq_entry->func = func; } /** * waitqueue_active -- locklessly test for waiters on the queue * @wq_head: the waitqueue to test for waiters * * returns true if the wait list is not empty * * NOTE: this function is lockless and requires care, incorrect usage _will_ * lead to sporadic and non-obvious failure. * * Use either while holding wait_queue_head::lock or when used for wakeups * with an extra smp_mb() like:: * * CPU0 - waker CPU1 - waiter * * for (;;) { * @cond = true; prepare_to_wait(&wq_head, &wait, state); * smp_mb(); // smp_mb() from set_current_state() * if (waitqueue_active(wq_head)) if (@cond) * wake_up(wq_head); break; * schedule(); * } * finish_wait(&wq_head, &wait); * * Because without the explicit smp_mb() it's possible for the * waitqueue_active() load to get hoisted over the @cond store such that we'll * observe an empty wait list while the waiter might not observe @cond. * * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), * which (when the lock is uncontended) are of roughly equal cost. */ static inline int waitqueue_active(struct wait_queue_head *wq_head) { return !list_empty(&wq_head->head); } /** * wq_has_single_sleeper - check if there is only one sleeper * @wq_head: wait queue head * * Returns true of wq_head has only one sleeper on the list. * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) { return list_is_singular(&wq_head->head); } /** * wq_has_sleeper - check if there are any waiting processes * @wq_head: wait queue head * * Returns true if wq_head has waiting processes * * Please refer to the comment for waitqueue_active. */ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head) { /* * We need to be sure we are in sync with the * add_wait_queue modifications to the wait queue. * * This memory barrier should be paired with one on the * waiting side. */ smp_mb(); return waitqueue_active(wq_head); } extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { struct list_head *head = &wq_head->head; struct wait_queue_entry *wq; list_for_each_entry(wq, &wq_head->head, entry) { if (!(wq->flags & WQ_FLAG_PRIORITY)) break; head = &wq->entry; } list_add(&wq_entry->entry, head); } /* * Used for wake-one threads: */ static inline void __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq_head, wq_entry); } static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_add_tail(&wq_entry->entry, &wq_head->head); } static inline void __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { wq_entry->flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_entry_tail(wq_head, wq_entry); } static inline void __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) { list_del(&wq_entry->entry); } int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE) /* * Wakeup macros to be used to report events to the targets. */ #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) #define wake_up_poll_on_current_cpu(x, m) \ __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m)) #define wake_up_locked_poll(x, m) \ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) #define wake_up_interruptible_poll(x, m) \ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) #define wake_up_interruptible_sync_poll_locked(x, m) \ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) /** * wake_up_pollfree - signal that a polled waitqueue is going away * @wq_head: the wait queue head * * In the very rare cases where a ->poll() implementation uses a waitqueue whose * lifetime is tied to a task rather than to the 'struct file' being polled, * this function must be called before the waitqueue is freed so that * non-blocking polls (e.g. epoll) are notified that the queue is going away. * * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. */ static inline void wake_up_pollfree(struct wait_queue_head *wq_head) { /* * For performance reasons, we don't always take the queue lock here. * Therefore, we might race with someone removing the last entry from * the queue, and proceed while they still hold the queue lock. * However, rcu_read_lock() is required to be held in such cases, so we * can safely proceed with an RCU-delayed free. */ if (waitqueue_active(wq_head)) __wake_up_pollfree(wq_head); } #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ if (__cond && !__ret) \ __ret = 1; \ __cond || !__ret; \ }) #define ___wait_is_interruptible(state) \ (!__builtin_constant_p(state) || \ (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags); /* * The below macro ___wait_event() has an explicit shadow of the __ret * variable when used from the wait_event_*() macros. * * This is so that both can use the ___wait_cond_timeout() construct * to wrap the condition. * * The type inconsistency of the wait_event_*() __ret variable is also * on purpose; we use long where we can return timeout values and int * otherwise. */ #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \ ({ \ __label__ __out; \ struct wait_queue_entry __wq_entry; \ long __ret = ret; /* explicit shadow */ \ \ init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ for (;;) { \ long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\ \ if (condition) \ break; \ \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ goto __out; \ } \ \ cmd; \ } \ finish_wait(&wq_head, &__wq_entry); \ __out: __ret; \ }) #define __wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __wait_event(wq_head, condition); \ } while (0) #define __io_wait_event(wq_head, condition) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ io_schedule()) /* * io_wait_event() -- like wait_event() but with io_schedule() */ #define io_wait_event(wq_head, condition) \ do { \ might_sleep(); \ if (condition) \ break; \ __io_wait_event(wq_head, condition); \ } while (0) #define __wait_event_freezable(wq_head, condition) \ ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \ 0, 0, schedule()) /** * wait_event_freezable - sleep (or freeze) until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute * to system load) until the @condition evaluates to true. The * @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_freezable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable(wq_head, condition); \ __ret; \ }) #define __wait_event_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \ __ret = schedule_timeout(__ret)) /* * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid * increasing load and is freezable. */ #define wait_event_freezable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ cmd1; schedule(); cmd2) /* * Just like wait_event_cmd(), except it sets exclusive flag */ #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ cmd1; schedule(); cmd2) /** * wait_event_cmd - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @cmd1: the command will be executed before sleep * @cmd2: the command will be executed after sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \ do { \ if (condition) \ break; \ __wait_event_cmd(wq_head, condition, cmd1, cmd2); \ } while (0) #define __wait_event_interruptible(wq_head, condition) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) /** * wait_event_interruptible - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible(wq_head, condition); \ __ret; \ }) #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_INTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a signal. */ #define wait_event_interruptible_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_interruptible_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \ ({ \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) { \ hrtimer_set_expires_range_ns(&__t.timer, timeout, \ current->timer_slack_ns); \ hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \ } \ \ __ret = ___wait_event(wq_head, condition, state, 0, 0, \ if (!__t.task) { \ __ret = -ETIME; \ break; \ } \ schedule()); \ \ hrtimer_cancel(&__t.timer); \ destroy_hrtimer_on_stack(&__t.timer); \ __ret; \ }) /** * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, or -ETIME if the timeout * elapsed. */ #define wait_event_hrtimeout(wq_head, condition, timeout) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /** * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, as a ktime_t * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function returns 0 if @condition became true, -ERESTARTSYS if it was * interrupted by a signal, or -ETIME if the timeout elapsed. */ #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ ({ \ long __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define __wait_event_interruptible_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ schedule()) #define wait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_killable_exclusive(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ schedule()) #define wait_event_killable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable_exclusive(wq, condition); \ __ret; \ }) #define __wait_event_freezable_exclusive(wq, condition) \ ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\ schedule()) #define wait_event_freezable_exclusive(wq, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_freezable_exclusive(wq, condition); \ __ret; \ }) /** * wait_event_idle - wait for a condition without contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \ } while (0) /** * wait_event_idle_exclusive - wait for a condition with contributing to system load * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. * The @condition is checked each time the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * */ #define wait_event_idle_exclusive(wq_head, condition) \ do { \ might_sleep(); \ if (!(condition)) \ ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \ } while (0) #define __wait_event_idle_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \ __ret; \ }) #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_IDLE, 1, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_IDLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus if other processes wait on the same list, when this * process is woken further processes are not considered. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * or the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed. */ #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\ __ret; \ }) extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ ({ \ int __ret; \ DEFINE_WAIT(__wait); \ if (exclusive) \ __wait.flags |= WQ_FLAG_EXCLUSIVE; \ do { \ __ret = fn(&(wq), &__wait); \ if (__ret) \ break; \ } while (!(condition)); \ __remove_wait_queue(&(wq), &__wait); \ __set_current_state(TASK_RUNNING); \ __ret; \ }) /** * wait_event_interruptible_locked - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) /** * wait_event_interruptible_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) /** * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock()/spin_unlock() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) /** * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true * @wq: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq is woken up. * * It must be called with wq.lock being held. This spinlock is * unlocked while sleeping but @condition testing is done while lock * is held and when this macro exits the lock is held. * * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() * functions which must match the way they are locked/unlocked outside * of this macro. * * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag * set thus when other process waits process on the list if this * process is awaken further processes are not considered. * * wake_up_locked() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ ((condition) \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) #define __wait_event_killable(wq, condition) \ ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) /** * wait_event_killable - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ #define wait_event_killable(wq_head, condition) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable(wq_head, condition); \ __ret; \ }) #define __wait_event_state(wq, condition, state) \ ___wait_event(wq, condition, state, 0, 0, schedule()) /** * wait_event_state - sleep until a condition gets true * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @state: state to sleep in * * The process is put to sleep (@state) until the @condition evaluates to true * or a signal is received (when allowed by @state). The @condition is checked * each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * The function will return -ERESTARTSYS if it was interrupted by a signal * (when allowed by @state) and 0 if @condition evaluated to true. */ #define wait_event_state(wq_head, condition, state) \ ({ \ int __ret = 0; \ might_sleep(); \ if (!(condition)) \ __ret = __wait_event_state(wq_head, condition, state); \ __ret; \ }) #define __wait_event_killable_timeout(wq_head, condition, timeout) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ TASK_KILLABLE, 0, timeout, \ __ret = schedule_timeout(__ret)) /** * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_KILLABLE) until the * @condition evaluates to true or a kill signal is received. * The @condition is checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * Returns: * 0 if the @condition evaluated to %false after the @timeout elapsed, * 1 if the @condition evaluated to %true after the @timeout elapsed, * the remaining jiffies (at least 1) if the @condition evaluated * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was * interrupted by a kill signal. * * Only kill signals interrupt this process. */ #define wait_event_killable_timeout(wq_head, condition, timeout) \ ({ \ long __ret = timeout; \ might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_killable_timeout(wq_head, \ condition, timeout); \ __ret; \ }) #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \ (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_lock_irq_cmd - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd * and schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. */ #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, cmd); \ } while (0) /** * wait_event_lock_irq - sleep until a condition gets true. The * condition is checked under the lock. This * is expected to be called with the lock * taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the * @condition evaluates to true. The @condition is checked each time * the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. */ #define wait_event_lock_irq(wq_head, condition, lock) \ do { \ if (condition) \ break; \ __wait_event_lock_irq(wq_head, condition, lock, ); \ } while (0) #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \ ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ spin_unlock_irq(&lock); \ cmd; \ schedule(); \ spin_lock_irq(&lock)) /** * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. * The condition is checked under the lock. This is expected to * be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before cmd and * schedule() and reacquired afterwards. * @cmd: a command which is invoked outside the critical section before * sleep * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or a signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before invoking the cmd and going to sleep and is reacquired * afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock, cmd); \ __ret; \ }) /** * wait_event_interruptible_lock_irq - sleep until a condition gets true. * The condition is checked under the lock. This is expected * to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The macro will return -ERESTARTSYS if it was interrupted by a signal * and 0 if @condition evaluated to true. */ #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \ ({ \ int __ret = 0; \ if (!(condition)) \ __ret = __wait_event_interruptible_lock_irq(wq_head, \ condition, lock,); \ __ret; \ }) #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ state, 0, timeout, \ spin_unlock_irq(&lock); \ __ret = schedule_timeout(__ret); \ spin_lock_irq(&lock)); /** * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets * true or a timeout elapses. The condition is checked under * the lock. This is expected to be called with the lock taken. * @wq_head: the waitqueue to wait on * @condition: a C expression for the event to wait for * @lock: a locked spinlock_t, which will be released before schedule() * and reacquired afterwards. * @timeout: timeout, in jiffies * * The process is put to sleep (TASK_INTERRUPTIBLE) until the * @condition evaluates to true or signal is received. The @condition is * checked each time the waitqueue @wq_head is woken up. * * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * * This is supposed to be called while holding the lock. The lock is * dropped before going to sleep and is reacquired afterwards. * * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it * was interrupted by a signal, and the remaining jiffies otherwise * if the condition evaluated to true before the timeout elapsed. */ #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \ timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_INTERRUPTIBLE); \ __ret; \ }) #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_lock_irq_timeout( \ wq_head, condition, lock, timeout, \ TASK_UNINTERRUPTIBLE); \ __ret; \ }) /* * Waitqueues which are removed from the waitqueue_head at wakeup time */ void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); #define DEFINE_WAIT_FUNC(name, function) \ struct wait_queue_entry name = { \ .private = current, \ .func = function, \ .entry = LIST_HEAD_INIT((name).entry), \ } #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) #define init_wait(wait) \ do { \ (wait)->private = current; \ (wait)->func = autoremove_wake_function; \ INIT_LIST_HEAD(&(wait)->entry); \ (wait)->flags = 0; \ } while (0) typedef int (*task_call_f)(struct task_struct *p, void *arg); extern int task_call_func(struct task_struct *p, task_call_f func, void *arg); #endif /* _LINUX_WAIT_H */
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 // SPDX-License-Identifier: GPL-2.0-only /* * VFIO core * * Copyright (C) 2012 Red Hat, Inc. All rights reserved. * Author: Alex Williamson <alex.williamson@redhat.com> * * Derived from original vfio: * Copyright 2010 Cisco Systems, Inc. All rights reserved. * Author: Tom Lyon, pugs@cisco.com */ #include <linux/cdev.h> #include <linux/compat.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/idr.h> #include <linux/iommu.h> #if IS_ENABLED(CONFIG_KVM) #include <linux/kvm_host.h> #endif #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/pseudo_fs.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/vfio.h> #include <linux/wait.h> #include <linux/sched/signal.h> #include <linux/pm_runtime.h> #include <linux/interval_tree.h> #include <linux/iova_bitmap.h> #include <linux/iommufd.h> #include "vfio.h" #define DRIVER_VERSION "0.3" #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" #define DRIVER_DESC "VFIO - User Level meta-driver" #define VFIO_MAGIC 0x5646494f /* "VFIO" */ static struct vfio { struct class *device_class; struct ida device_ida; struct vfsmount *vfs_mount; int fs_count; } vfio; #ifdef CONFIG_VFIO_NOIOMMU bool vfio_noiommu __read_mostly; module_param_named(enable_unsafe_noiommu_mode, vfio_noiommu, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)"); #endif static DEFINE_XARRAY(vfio_device_set_xa); int vfio_assign_device_set(struct vfio_device *device, void *set_id) { unsigned long idx = (unsigned long)set_id; struct vfio_device_set *new_dev_set; struct vfio_device_set *dev_set; if (WARN_ON(!set_id)) return -EINVAL; /* * Atomically acquire a singleton object in the xarray for this set_id */ xa_lock(&vfio_device_set_xa); dev_set = xa_load(&vfio_device_set_xa, idx); if (dev_set) goto found_get_ref; xa_unlock(&vfio_device_set_xa); new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL); if (!new_dev_set) return -ENOMEM; mutex_init(&new_dev_set->lock); INIT_LIST_HEAD(&new_dev_set->device_list); new_dev_set->set_id = set_id; xa_lock(&vfio_device_set_xa); dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set, GFP_KERNEL); if (!dev_set) { dev_set = new_dev_set; goto found_get_ref; } kfree(new_dev_set); if (xa_is_err(dev_set)) { xa_unlock(&vfio_device_set_xa); return xa_err(dev_set); } found_get_ref: dev_set->device_count++; xa_unlock(&vfio_device_set_xa); mutex_lock(&dev_set->lock); device->dev_set = dev_set; list_add_tail(&device->dev_set_list, &dev_set->device_list); mutex_unlock(&dev_set->lock); return 0; } EXPORT_SYMBOL_GPL(vfio_assign_device_set); static void vfio_release_device_set(struct vfio_device *device) { struct vfio_device_set *dev_set = device->dev_set; if (!dev_set) return; mutex_lock(&dev_set->lock); list_del(&device->dev_set_list); mutex_unlock(&dev_set->lock); xa_lock(&vfio_device_set_xa); if (!--dev_set->device_count) { __xa_erase(&vfio_device_set_xa, (unsigned long)dev_set->set_id); mutex_destroy(&dev_set->lock); kfree(dev_set); } xa_unlock(&vfio_device_set_xa); } unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set) { struct vfio_device *cur; unsigned int open_count = 0; lockdep_assert_held(&dev_set->lock); list_for_each_entry(cur, &dev_set->device_list, dev_set_list) open_count += cur->open_count; return open_count; } EXPORT_SYMBOL_GPL(vfio_device_set_open_count); struct vfio_device * vfio_find_device_in_devset(struct vfio_device_set *dev_set, struct device *dev) { struct vfio_device *cur; lockdep_assert_held(&dev_set->lock); list_for_each_entry(cur, &dev_set->device_list, dev_set_list) if (cur->dev == dev) return cur; return NULL; } EXPORT_SYMBOL_GPL(vfio_find_device_in_devset); /* * Device objects - create, release, get, put, search */ /* Device reference always implies a group reference */ void vfio_device_put_registration(struct vfio_device *device) { if (refcount_dec_and_test(&device->refcount)) complete(&device->comp); } bool vfio_device_try_get_registration(struct vfio_device *device) { return refcount_inc_not_zero(&device->refcount); } /* * VFIO driver API */ /* Release helper called by vfio_put_device() */ static void vfio_device_release(struct device *dev) { struct vfio_device *device = container_of(dev, struct vfio_device, device); vfio_release_device_set(device); ida_free(&vfio.device_ida, device->index); if (device->ops->release) device->ops->release(device); iput(device->inode); simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); kvfree(device); } static int vfio_init_device(struct vfio_device *device, struct device *dev, const struct vfio_device_ops *ops); /* * Allocate and initialize vfio_device so it can be registered to vfio * core. * * Drivers should use the wrapper vfio_alloc_device() for allocation. * @size is the size of the structure to be allocated, including any * private data used by the driver. * * Driver may provide an @init callback to cover device private data. * * Use vfio_put_device() to release the structure after success return. */ struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, const struct vfio_device_ops *ops) { struct vfio_device *device; int ret; if (WARN_ON(size < sizeof(struct vfio_device))) return ERR_PTR(-EINVAL); device = kvzalloc(size, GFP_KERNEL); if (!device) return ERR_PTR(-ENOMEM); ret = vfio_init_device(device, dev, ops); if (ret) goto out_free; return device; out_free: kvfree(device); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(_vfio_alloc_device); static int vfio_fs_init_fs_context(struct fs_context *fc) { return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM; } static struct file_system_type vfio_fs_type = { .name = "vfio", .owner = THIS_MODULE, .init_fs_context = vfio_fs_init_fs_context, .kill_sb = kill_anon_super, }; static struct inode *vfio_fs_inode_new(void) { struct inode *inode; int ret; ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count); if (ret) return ERR_PTR(ret); inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb); if (IS_ERR(inode)) simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); return inode; } /* * Initialize a vfio_device so it can be registered to vfio core. */ static int vfio_init_device(struct vfio_device *device, struct device *dev, const struct vfio_device_ops *ops) { int ret; ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL); if (ret < 0) { dev_dbg(dev, "Error to alloc index\n"); return ret; } device->index = ret; init_completion(&device->comp); device->dev = dev; device->ops = ops; device->inode = vfio_fs_inode_new(); if (IS_ERR(device->inode)) { ret = PTR_ERR(device->inode); goto out_inode; } if (ops->init) { ret = ops->init(device); if (ret) goto out_uninit; } device_initialize(&device->device); device->device.release = vfio_device_release; device->device.class = vfio.device_class; device->device.parent = device->dev; return 0; out_uninit: iput(device->inode); simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); out_inode: vfio_release_device_set(device); ida_free(&vfio.device_ida, device->index); return ret; } static int __vfio_register_dev(struct vfio_device *device, enum vfio_group_type type) { int ret; if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) && (!device->ops->bind_iommufd || !device->ops->unbind_iommufd || !device->ops->attach_ioas || !device->ops->detach_ioas))) return -EINVAL; /* * If the driver doesn't specify a set then the device is added to a * singleton set just for itself. */ if (!device->dev_set) vfio_assign_device_set(device, device); ret = dev_set_name(&device->device, "vfio%d", device->index); if (ret) return ret; ret = vfio_device_set_group(device, type); if (ret) return ret; /* * VFIO always sets IOMMU_CACHE because we offer no way for userspace to * restore cache coherency. It has to be checked here because it is only * valid for cases where we are using iommu groups. */ if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) && !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) { ret = -EINVAL; goto err_out; } ret = vfio_device_add(device); if (ret) goto err_out; /* Refcounting can't start until the driver calls register */ refcount_set(&device->refcount, 1); vfio_device_group_register(device); vfio_device_debugfs_init(device); return 0; err_out: vfio_device_remove_group(device); return ret; } int vfio_register_group_dev(struct vfio_device *device) { return __vfio_register_dev(device, VFIO_IOMMU); } EXPORT_SYMBOL_GPL(vfio_register_group_dev); /* * Register a virtual device without IOMMU backing. The user of this * device must not be able to directly trigger unmediated DMA. */ int vfio_register_emulated_iommu_dev(struct vfio_device *device) { return __vfio_register_dev(device, VFIO_EMULATED_IOMMU); } EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev); /* * Decrement the device reference count and wait for the device to be * removed. Open file descriptors for the device... */ void vfio_unregister_group_dev(struct vfio_device *device) { unsigned int i = 0; bool interrupted = false; long rc; /* * Prevent new device opened by userspace via the * VFIO_GROUP_GET_DEVICE_FD in the group path. */ vfio_device_group_unregister(device); /* * Balances vfio_device_add() in register path, also prevents * new device opened by userspace in the cdev path. */ vfio_device_del(device); vfio_device_put_registration(device); rc = try_wait_for_completion(&device->comp); while (rc <= 0) { if (device->ops->request) device->ops->request(device, i++); if (interrupted) { rc = wait_for_completion_timeout(&device->comp, HZ * 10); } else { rc = wait_for_completion_interruptible_timeout( &device->comp, HZ * 10); if (rc < 0) { interrupted = true; dev_warn(device->dev, "Device is currently in use, task" " \"%s\" (%d) " "blocked until device is released", current->comm, task_pid_nr(current)); } } } vfio_device_debugfs_exit(device); /* Balances vfio_device_set_group in register path */ vfio_device_remove_group(device); } EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); #if IS_ENABLED(CONFIG_KVM) void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm) { void (*pfn)(struct kvm *kvm); bool (*fn)(struct kvm *kvm); bool ret; lockdep_assert_held(&device->dev_set->lock); if (!kvm) return; pfn = symbol_get(kvm_put_kvm); if (WARN_ON(!pfn)) return; fn = symbol_get(kvm_get_kvm_safe); if (WARN_ON(!fn)) { symbol_put(kvm_put_kvm); return; } ret = fn(kvm); symbol_put(kvm_get_kvm_safe); if (!ret) { symbol_put(kvm_put_kvm); return; } device->put_kvm = pfn; device->kvm = kvm; } void vfio_device_put_kvm(struct vfio_device *device) { lockdep_assert_held(&device->dev_set->lock); if (!device->kvm) return; if (WARN_ON(!device->put_kvm)) goto clear; device->put_kvm(device->kvm); device->put_kvm = NULL; symbol_put(kvm_put_kvm); clear: device->kvm = NULL; } #endif /* true if the vfio_device has open_device() called but not close_device() */ static bool vfio_assert_device_open(struct vfio_device *device) { return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); } struct vfio_device_file * vfio_allocate_device_file(struct vfio_device *device) { struct vfio_device_file *df; df = kzalloc(sizeof(*df), GFP_KERNEL_ACCOUNT); if (!df) return ERR_PTR(-ENOMEM); df->device = device; spin_lock_init(&df->kvm_ref_lock); return df; } static int vfio_df_device_first_open(struct vfio_device_file *df) { struct vfio_device *device = df->device; struct iommufd_ctx *iommufd = df->iommufd; int ret; lockdep_assert_held(&device->dev_set->lock); if (!try_module_get(device->dev->driver->owner)) return -ENODEV; if (iommufd) ret = vfio_df_iommufd_bind(df); else ret = vfio_device_group_use_iommu(device); if (ret) goto err_module_put; if (device->ops->open_device) { ret = device->ops->open_device(device); if (ret) goto err_unuse_iommu; } return 0; err_unuse_iommu: if (iommufd) vfio_df_iommufd_unbind(df); else vfio_device_group_unuse_iommu(device); err_module_put: module_put(device->dev->driver->owner); return ret; } static void vfio_df_device_last_close(struct vfio_device_file *df) { struct vfio_device *device = df->device; struct iommufd_ctx *iommufd = df->iommufd; lockdep_assert_held(&device->dev_set->lock); if (device->ops->close_device) device->ops->close_device(device); if (iommufd) vfio_df_iommufd_unbind(df); else vfio_device_group_unuse_iommu(device); module_put(device->dev->driver->owner); } int vfio_df_open(struct vfio_device_file *df) { struct vfio_device *device = df->device; int ret = 0; lockdep_assert_held(&device->dev_set->lock); /* * Only the group path allows the device to be opened multiple * times. The device cdev path doesn't have a secure way for it. */ if (device->open_count != 0 && !df->group) return -EINVAL; device->open_count++; if (device->open_count == 1) { ret = vfio_df_device_first_open(df); if (ret) device->open_count--; } return ret; } void vfio_df_close(struct vfio_device_file *df) { struct vfio_device *device = df->device; lockdep_assert_held(&device->dev_set->lock); vfio_assert_device_open(device); if (device->open_count == 1) vfio_df_device_last_close(df); device->open_count--; } /* * Wrapper around pm_runtime_resume_and_get(). * Return error code on failure or 0 on success. */ static inline int vfio_device_pm_runtime_get(struct vfio_device *device) { struct device *dev = device->dev; if (dev->driver && dev->driver->pm) { int ret; ret = pm_runtime_resume_and_get(dev); if (ret) { dev_info_ratelimited(dev, "vfio: runtime resume failed %d\n", ret); return -EIO; } } return 0; } /* * Wrapper around pm_runtime_put(). */ static inline void vfio_device_pm_runtime_put(struct vfio_device *device) { struct device *dev = device->dev; if (dev->driver && dev->driver->pm) pm_runtime_put(dev); } /* * VFIO Device fd */ static int vfio_device_fops_release(struct inode *inode, struct file *filep) { struct vfio_device_file *df = filep->private_data; struct vfio_device *device = df->device; if (df->group) vfio_df_group_close(df); else vfio_df_unbind_iommufd(df); vfio_device_put_registration(device); kfree(df); return 0; } /* * vfio_mig_get_next_state - Compute the next step in the FSM * @cur_fsm - The current state the device is in * @new_fsm - The target state to reach * @next_fsm - Pointer to the next step to get to new_fsm * * Return 0 upon success, otherwise -errno * Upon success the next step in the state progression between cur_fsm and * new_fsm will be set in next_fsm. * * This breaks down requests for combination transitions into smaller steps and * returns the next step to get to new_fsm. The function may need to be called * multiple times before reaching new_fsm. * */ int vfio_mig_get_next_state(struct vfio_device *device, enum vfio_device_mig_state cur_fsm, enum vfio_device_mig_state new_fsm, enum vfio_device_mig_state *next_fsm) { enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_PRE_COPY_P2P + 1 }; /* * The coding in this table requires the driver to implement the * following FSM arcs: * RESUMING -> STOP * STOP -> RESUMING * STOP -> STOP_COPY * STOP_COPY -> STOP * * If P2P is supported then the driver must also implement these FSM * arcs: * RUNNING -> RUNNING_P2P * RUNNING_P2P -> RUNNING * RUNNING_P2P -> STOP * STOP -> RUNNING_P2P * * If precopy is supported then the driver must support these additional * FSM arcs: * RUNNING -> PRE_COPY * PRE_COPY -> RUNNING * PRE_COPY -> STOP_COPY * However, if precopy and P2P are supported together then the driver * must support these additional arcs beyond the P2P arcs above: * PRE_COPY -> RUNNING * PRE_COPY -> PRE_COPY_P2P * PRE_COPY_P2P -> PRE_COPY * PRE_COPY_P2P -> RUNNING_P2P * PRE_COPY_P2P -> STOP_COPY * RUNNING -> PRE_COPY * RUNNING_P2P -> PRE_COPY_P2P * * Without P2P and precopy the driver must implement: * RUNNING -> STOP * STOP -> RUNNING * * The coding will step through multiple states for some combination * transitions; if all optional features are supported, this means the * following ones: * PRE_COPY -> PRE_COPY_P2P -> STOP_COPY * PRE_COPY -> RUNNING -> RUNNING_P2P * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP -> RESUMING * PRE_COPY_P2P -> RUNNING_P2P -> RUNNING * PRE_COPY_P2P -> RUNNING_P2P -> STOP * PRE_COPY_P2P -> RUNNING_P2P -> STOP -> RESUMING * RESUMING -> STOP -> RUNNING_P2P * RESUMING -> STOP -> RUNNING_P2P -> PRE_COPY_P2P * RESUMING -> STOP -> RUNNING_P2P -> RUNNING * RESUMING -> STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY * RESUMING -> STOP -> STOP_COPY * RUNNING -> RUNNING_P2P -> PRE_COPY_P2P * RUNNING -> RUNNING_P2P -> STOP * RUNNING -> RUNNING_P2P -> STOP -> RESUMING * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY * RUNNING_P2P -> RUNNING -> PRE_COPY * RUNNING_P2P -> STOP -> RESUMING * RUNNING_P2P -> STOP -> STOP_COPY * STOP -> RUNNING_P2P -> PRE_COPY_P2P * STOP -> RUNNING_P2P -> RUNNING * STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY * STOP_COPY -> STOP -> RESUMING * STOP_COPY -> STOP -> RUNNING_P2P * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING * * The following transitions are blocked: * STOP_COPY -> PRE_COPY * STOP_COPY -> PRE_COPY_P2P */ static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { [VFIO_DEVICE_STATE_STOP] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_RUNNING] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_PRE_COPY] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_PRE_COPY_P2P, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_STOP_COPY] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_RESUMING] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_RUNNING_P2P] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, [VFIO_DEVICE_STATE_ERROR] = { [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR, [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, }, }; static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = { [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY, [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY, [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY, [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY, [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY, [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY, [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P, [VFIO_DEVICE_STATE_ERROR] = ~0U, }; if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || (state_flags_table[cur_fsm] & device->migration_flags) != state_flags_table[cur_fsm])) return -EINVAL; if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || (state_flags_table[new_fsm] & device->migration_flags) != state_flags_table[new_fsm]) return -EINVAL; /* * Arcs touching optional and unsupported states are skipped over. The * driver will instead see an arc from the original state to the next * logical state, as per the above comment. */ *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm]; while ((state_flags_table[*next_fsm] & device->migration_flags) != state_flags_table[*next_fsm]) *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm]; return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(vfio_mig_get_next_state); /* * Convert the drivers's struct file into a FD number and return it to userspace */ static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg, struct vfio_device_feature_mig_state *mig) { int ret; int fd; fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) { ret = fd; goto out_fput; } mig->data_fd = fd; if (copy_to_user(arg, mig, sizeof(*mig))) { ret = -EFAULT; goto out_put_unused; } fd_install(fd, filp); return 0; out_put_unused: put_unused_fd(fd); out_fput: fput(filp); return ret; } static int vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) { size_t minsz = offsetofend(struct vfio_device_feature_mig_state, data_fd); struct vfio_device_feature_mig_state mig; struct file *filp = NULL; int ret; if (!device->mig_ops) return -ENOTTY; ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_GET, sizeof(mig)); if (ret != 1) return ret; if (copy_from_user(&mig, arg, minsz)) return -EFAULT; if (flags & VFIO_DEVICE_FEATURE_GET) { enum vfio_device_mig_state curr_state; ret = device->mig_ops->migration_get_state(device, &curr_state); if (ret) return ret; mig.device_state = curr_state; goto out_copy; } /* Handle the VFIO_DEVICE_FEATURE_SET */ filp = device->mig_ops->migration_set_state(device, mig.device_state); if (IS_ERR(filp) || !filp) goto out_copy; return vfio_ioct_mig_return_fd(filp, arg, &mig); out_copy: mig.data_fd = -1; if (copy_to_user(arg, &mig, sizeof(mig))) return -EFAULT; if (IS_ERR(filp)) return PTR_ERR(filp); return 0; } static int vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) { struct vfio_device_feature_mig_data_size data_size = {}; unsigned long stop_copy_length; int ret; if (!device->mig_ops) return -ENOTTY; ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, sizeof(data_size)); if (ret != 1) return ret; ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length); if (ret) return ret; data_size.stop_copy_length = stop_copy_length; if (copy_to_user(arg, &data_size, sizeof(data_size))) return -EFAULT; return 0; } static int vfio_ioctl_device_feature_migration(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) { struct vfio_device_feature_migration mig = { .flags = device->migration_flags, }; int ret; if (!device->mig_ops) return -ENOTTY; ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, sizeof(mig)); if (ret != 1) return ret; if (copy_to_user(arg, &mig, sizeof(mig))) return -EFAULT; return 0; } void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes, u32 req_nodes) { struct interval_tree_node *prev, *curr, *comb_start, *comb_end; unsigned long min_gap, curr_gap; /* Special shortcut when a single range is required */ if (req_nodes == 1) { unsigned long last; comb_start = interval_tree_iter_first(root, 0, ULONG_MAX); /* Empty list */ if (WARN_ON_ONCE(!comb_start)) return; curr = comb_start; while (curr) { last = curr->last; prev = curr; curr = interval_tree_iter_next(curr, 0, ULONG_MAX); if (prev != comb_start) interval_tree_remove(prev, root); } comb_start->last = last; return; } /* Combine ranges which have the smallest gap */ while (cur_nodes > req_nodes) { prev = NULL; min_gap = ULONG_MAX; curr = interval_tree_iter_first(root, 0, ULONG_MAX); while (curr) { if (prev) { curr_gap = curr->start - prev->last; if (curr_gap < min_gap) { min_gap = curr_gap; comb_start = prev; comb_end = curr; } } prev = curr; curr = interval_tree_iter_next(curr, 0, ULONG_MAX); } /* Empty list or no nodes to combine */ if (WARN_ON_ONCE(min_gap == ULONG_MAX)) break; comb_start->last = comb_end->last; interval_tree_remove(comb_end, root); cur_nodes--; } } EXPORT_SYMBOL_GPL(vfio_combine_iova_ranges); /* Ranges should fit into a single kernel page */ #define LOG_MAX_RANGES \ (PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range)) static int vfio_ioctl_device_feature_logging_start(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) { size_t minsz = offsetofend(struct vfio_device_feature_dma_logging_control, ranges); struct vfio_device_feature_dma_logging_range __user *ranges; struct vfio_device_feature_dma_logging_control control; struct vfio_device_feature_dma_logging_range range; struct rb_root_cached root = RB_ROOT_CACHED; struct interval_tree_node *nodes; u64 iova_end; u32 nnodes; int i, ret; if (!device->log_ops) return -ENOTTY; ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, sizeof(control)); if (ret != 1) return ret; if (copy_from_user(&control, arg, minsz)) return -EFAULT; nnodes = control.num_ranges; if (!nnodes) return -EINVAL; if (nnodes > LOG_MAX_RANGES) return -E2BIG; ranges = u64_to_user_ptr(control.ranges); nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), GFP_KERNEL); if (!nodes) return -ENOMEM; for (i = 0; i < nnodes; i++) { if (copy_from_user(&range, &ranges[i], sizeof(range))) { ret = -EFAULT; goto end; } if (!IS_ALIGNED(range.iova, control.page_size) || !IS_ALIGNED(range.length, control.page_size)) { ret = -EINVAL; goto end; } if (check_add_overflow(range.iova, range.length, &iova_end) || iova_end > ULONG_MAX) { ret = -EOVERFLOW; goto end; } nodes[i].start = range.iova; nodes[i].last = range.iova + range.length - 1; if (interval_tree_iter_first(&root, nodes[i].start, nodes[i].last)) { /* Range overlapping */ ret = -EINVAL; goto end; } interval_tree_insert(nodes + i, &root); } ret = device->log_ops->log_start(device, &root, nnodes, &control.page_size); if (ret) goto end; if (copy_to_user(arg, &control, sizeof(control))) { ret = -EFAULT; device->log_ops->log_stop(device); } end: kfree(nodes); return ret; } static int vfio_ioctl_device_feature_logging_stop(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) { int ret; if (!device->log_ops) return -ENOTTY; ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0); if (ret != 1) return ret; return device->log_ops->log_stop(device); } static int vfio_device_log_read_and_clear(struct iova_bitmap *iter, unsigned long iova, size_t length, void *opaque) { struct vfio_device *device = opaque; return device->log_ops->log_read_and_clear(device, iova, length, iter); } static int vfio_ioctl_device_feature_logging_report(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) { size_t minsz = offsetofend(struct vfio_device_feature_dma_logging_report, bitmap); struct vfio_device_feature_dma_logging_report report; struct iova_bitmap *iter; u64 iova_end; int ret; if (!device->log_ops) return -ENOTTY; ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, sizeof(report)); if (ret != 1) return ret; if (copy_from_user(&report, arg, minsz)) return -EFAULT; if (report.page_size < SZ_4K || !is_power_of_2(report.page_size)) return -EINVAL; if (check_add_overflow(report.iova, report.length, &iova_end) || iova_end > ULONG_MAX) return -EOVERFLOW; iter = iova_bitmap_alloc(report.iova, report.length, report.page_size, u64_to_user_ptr(report.bitmap)); if (IS_ERR(iter)) return PTR_ERR(iter); ret = iova_bitmap_for_each(iter, device, vfio_device_log_read_and_clear); iova_bitmap_free(iter); return ret; } static int vfio_ioctl_device_feature(struct vfio_device *device, struct vfio_device_feature __user *arg) { size_t minsz = offsetofend(struct vfio_device_feature, flags); struct vfio_device_feature feature; if (copy_from_user(&feature, arg, minsz)) return -EFAULT; if (feature.argsz < minsz) return -EINVAL; /* Check unknown flags */ if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE)) return -EINVAL; /* GET & SET are mutually exclusive except with PROBE */ if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) && (feature.flags & VFIO_DEVICE_FEATURE_SET) && (feature.flags & VFIO_DEVICE_FEATURE_GET)) return -EINVAL; switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { case VFIO_DEVICE_FEATURE_MIGRATION: return vfio_ioctl_device_feature_migration( device, feature.flags, arg->data, feature.argsz - minsz); case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE: return vfio_ioctl_device_feature_mig_device_state( device, feature.flags, arg->data, feature.argsz - minsz); case VFIO_DEVICE_FEATURE_DMA_LOGGING_START: return vfio_ioctl_device_feature_logging_start( device, feature.flags, arg->data, feature.argsz - minsz); case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP: return vfio_ioctl_device_feature_logging_stop( device, feature.flags, arg->data, feature.argsz - minsz); case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT: return vfio_ioctl_device_feature_logging_report( device, feature.flags, arg->data, feature.argsz - minsz); case VFIO_DEVICE_FEATURE_MIG_DATA_SIZE: return vfio_ioctl_device_feature_migration_data_size( device, feature.flags, arg->data, feature.argsz - minsz); default: if (unlikely(!device->ops->device_feature)) return -EINVAL; return device->ops->device_feature(device, feature.flags, arg->data, feature.argsz - minsz); } } static long vfio_device_fops_unl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct vfio_device_file *df = filep->private_data; struct vfio_device *device = df->device; void __user *uptr = (void __user *)arg; int ret; if (cmd == VFIO_DEVICE_BIND_IOMMUFD) return vfio_df_ioctl_bind_iommufd(df, uptr); /* Paired with smp_store_release() following vfio_df_open() */ if (!smp_load_acquire(&df->access_granted)) return -EINVAL; ret = vfio_device_pm_runtime_get(device); if (ret) return ret; /* cdev only ioctls */ if (IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV) && !df->group) { switch (cmd) { case VFIO_DEVICE_ATTACH_IOMMUFD_PT: ret = vfio_df_ioctl_attach_pt(df, uptr); goto out; case VFIO_DEVICE_DETACH_IOMMUFD_PT: ret = vfio_df_ioctl_detach_pt(df, uptr); goto out; } } switch (cmd) { case VFIO_DEVICE_FEATURE: ret = vfio_ioctl_device_feature(device, uptr); break; default: if (unlikely(!device->ops->ioctl)) ret = -EINVAL; else ret = device->ops->ioctl(device, cmd, arg); break; } out: vfio_device_pm_runtime_put(device); return ret; } static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, size_t count, loff_t *ppos) { struct vfio_device_file *df = filep->private_data; struct vfio_device *device = df->device; /* Paired with smp_store_release() following vfio_df_open() */ if (!smp_load_acquire(&df->access_granted)) return -EINVAL; if (unlikely(!device->ops->read)) return -EINVAL; return device->ops->read(device, buf, count, ppos); } static ssize_t vfio_device_fops_write(struct file *filep, const char __user *buf, size_t count, loff_t *ppos) { struct vfio_device_file *df = filep->private_data; struct vfio_device *device = df->device; /* Paired with smp_store_release() following vfio_df_open() */ if (!smp_load_acquire(&df->access_granted)) return -EINVAL; if (unlikely(!device->ops->write)) return -EINVAL; return device->ops->write(device, buf, count, ppos); } static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) { struct vfio_device_file *df = filep->private_data; struct vfio_device *device = df->device; /* Paired with smp_store_release() following vfio_df_open() */ if (!smp_load_acquire(&df->access_granted)) return -EINVAL; if (unlikely(!device->ops->mmap)) return -EINVAL; return device->ops->mmap(device, vma); } const struct file_operations vfio_device_fops = { .owner = THIS_MODULE, .open = vfio_device_fops_cdev_open, .release = vfio_device_fops_release, .read = vfio_device_fops_read, .write = vfio_device_fops_write, .unlocked_ioctl = vfio_device_fops_unl_ioctl, .compat_ioctl = compat_ptr_ioctl, .mmap = vfio_device_fops_mmap, }; static struct vfio_device *vfio_device_from_file(struct file *file) { struct vfio_device_file *df = file->private_data; if (file->f_op != &vfio_device_fops) return NULL; return df->device; } /** * vfio_file_is_valid - True if the file is valid vfio file * @file: VFIO group file or VFIO device file */ bool vfio_file_is_valid(struct file *file) { return vfio_group_from_file(file) || vfio_device_from_file(file); } EXPORT_SYMBOL_GPL(vfio_file_is_valid); /** * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file * is always CPU cache coherent * @file: VFIO group file or VFIO device file * * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop * bit in DMA transactions. A return of false indicates that the user has * rights to access additional instructions such as wbinvd on x86. */ bool vfio_file_enforced_coherent(struct file *file) { struct vfio_device *device; struct vfio_group *group; group = vfio_group_from_file(file); if (group) return vfio_group_enforced_coherent(group); device = vfio_device_from_file(file); if (device) return device_iommu_capable(device->dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY); return true; } EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm) { struct vfio_device_file *df = file->private_data; /* * The kvm is first recorded in the vfio_device_file, and will * be propagated to vfio_device::kvm when the file is bound to * iommufd successfully in the vfio device cdev path. */ spin_lock(&df->kvm_ref_lock); df->kvm = kvm; spin_unlock(&df->kvm_ref_lock); } /** * vfio_file_set_kvm - Link a kvm with VFIO drivers * @file: VFIO group file or VFIO device file * @kvm: KVM to link * * When a VFIO device is first opened the KVM will be available in * device->kvm if one was associated with the file. */ void vfio_file_set_kvm(struct file *file, struct kvm *kvm) { struct vfio_group *group; group = vfio_group_from_file(file); if (group) vfio_group_set_kvm(group, kvm); if (vfio_device_from_file(file)) vfio_device_file_set_kvm(file, kvm); } EXPORT_SYMBOL_GPL(vfio_file_set_kvm); /* * Sub-module support */ /* * Helper for managing a buffer of info chain capabilities, allocate or * reallocate a buffer with additional @size, filling in @id and @version * of the capability. A pointer to the new capability is returned. * * NB. The chain is based at the head of the buffer, so new entries are * added to the tail, vfio_info_cap_shift() should be called to fixup the * next offsets prior to copying to the user buffer. */ struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps, size_t size, u16 id, u16 version) { void *buf; struct vfio_info_cap_header *header, *tmp; /* Ensure that the next capability struct will be aligned */ size = ALIGN(size, sizeof(u64)); buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL); if (!buf) { kfree(caps->buf); caps->buf = NULL; caps->size = 0; return ERR_PTR(-ENOMEM); } caps->buf = buf; header = buf + caps->size; /* Eventually copied to user buffer, zero */ memset(header, 0, size); header->id = id; header->version = version; /* Add to the end of the capability chain */ for (tmp = buf; tmp->next; tmp = buf + tmp->next) ; /* nothing */ tmp->next = caps->size; caps->size += size; return header; } EXPORT_SYMBOL_GPL(vfio_info_cap_add); void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset) { struct vfio_info_cap_header *tmp; void *buf = (void *)caps->buf; /* Capability structs should start with proper alignment */ WARN_ON(!IS_ALIGNED(offset, sizeof(u64))); for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset) tmp->next += offset; } EXPORT_SYMBOL(vfio_info_cap_shift); int vfio_info_add_capability(struct vfio_info_cap *caps, struct vfio_info_cap_header *cap, size_t size) { struct vfio_info_cap_header *header; header = vfio_info_cap_add(caps, size, cap->id, cap->version); if (IS_ERR(header)) return PTR_ERR(header); memcpy(header + 1, cap + 1, size - sizeof(*header)); return 0; } EXPORT_SYMBOL(vfio_info_add_capability); int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, int max_irq_type, size_t *data_size) { unsigned long minsz; size_t size; minsz = offsetofend(struct vfio_irq_set, count); if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) || (hdr->count >= (U32_MAX - hdr->start)) || (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | VFIO_IRQ_SET_ACTION_TYPE_MASK))) return -EINVAL; if (data_size) *data_size = 0; if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs) return -EINVAL; switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { case VFIO_IRQ_SET_DATA_NONE: size = 0; break; case VFIO_IRQ_SET_DATA_BOOL: size = sizeof(uint8_t); break; case VFIO_IRQ_SET_DATA_EVENTFD: size = sizeof(int32_t); break; default: return -EINVAL; } if (size) { if (hdr->argsz - minsz < hdr->count * size) return -EINVAL; if (!data_size) return -EINVAL; *data_size = hdr->count * size; } return 0; } EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); /* * Pin contiguous user pages and return their associated host pages for local * domain only. * @device [in] : device * @iova [in] : starting IOVA of user pages to be pinned. * @npage [in] : count of pages to be pinned. This count should not * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. * @prot [in] : protection flags * @pages[out] : array of host pages * Return error or number of pages pinned. * * A driver may only call this function if the vfio_device was created * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages(). */ int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, int npage, int prot, struct page **pages) { /* group->container cannot change while a vfio device is open */ if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device))) return -EINVAL; if (!device->ops->dma_unmap) return -EINVAL; if (vfio_device_has_container(device)) return vfio_device_container_pin_pages(device, iova, npage, prot, pages); if (device->iommufd_access) { int ret; if (iova > ULONG_MAX) return -EINVAL; /* * VFIO ignores the sub page offset, npages is from the start of * a PAGE_SIZE chunk of IOVA. The caller is expected to recover * the sub page offset by doing: * pages[0] + (iova % PAGE_SIZE) */ ret = iommufd_access_pin_pages( device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE), npage * PAGE_SIZE, pages, (prot & IOMMU_WRITE) ? IOMMUFD_ACCESS_RW_WRITE : 0); if (ret) return ret; return npage; } return -EINVAL; } EXPORT_SYMBOL(vfio_pin_pages); /* * Unpin contiguous host pages for local domain only. * @device [in] : device * @iova [in] : starting address of user pages to be unpinned. * @npage [in] : count of pages to be unpinned. This count should not * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. */ void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) { if (WARN_ON(!vfio_assert_device_open(device))) return; if (WARN_ON(!device->ops->dma_unmap)) return; if (vfio_device_has_container(device)) { vfio_device_container_unpin_pages(device, iova, npage); return; } if (device->iommufd_access) { if (WARN_ON(iova > ULONG_MAX)) return; iommufd_access_unpin_pages(device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE), npage * PAGE_SIZE); return; } } EXPORT_SYMBOL(vfio_unpin_pages); /* * This interface allows the CPUs to perform some sort of virtual DMA on * behalf of the device. * * CPUs read/write from/into a range of IOVAs pointing to user space memory * into/from a kernel buffer. * * As the read/write of user space memory is conducted via the CPUs and is * not a real device DMA, it is not necessary to pin the user space memory. * * @device [in] : VFIO device * @iova [in] : base IOVA of a user space buffer * @data [in] : pointer to kernel buffer * @len [in] : kernel buffer length * @write : indicate read or write * Return error code on failure or 0 on success. */ int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, size_t len, bool write) { if (!data || len <= 0 || !vfio_assert_device_open(device)) return -EINVAL; if (vfio_device_has_container(device)) return vfio_device_container_dma_rw(device, iova, data, len, write); if (device->iommufd_access) { unsigned int flags = 0; if (iova > ULONG_MAX) return -EINVAL; /* VFIO historically tries to auto-detect a kthread */ if (!current->mm) flags |= IOMMUFD_ACCESS_RW_KTHREAD; if (write) flags |= IOMMUFD_ACCESS_RW_WRITE; return iommufd_access_rw(device->iommufd_access, iova, data, len, flags); } return -EINVAL; } EXPORT_SYMBOL(vfio_dma_rw); /* * Module/class support */ static int __init vfio_init(void) { int ret; ida_init(&vfio.device_ida); ret = vfio_group_init(); if (ret) return ret; ret = vfio_virqfd_init(); if (ret) goto err_virqfd; /* /sys/class/vfio-dev/vfioX */ vfio.device_class = class_create("vfio-dev"); if (IS_ERR(vfio.device_class)) { ret = PTR_ERR(vfio.device_class); goto err_dev_class; } ret = vfio_cdev_init(vfio.device_class); if (ret) goto err_alloc_dev_chrdev; vfio_debugfs_create_root(); pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return 0; err_alloc_dev_chrdev: class_destroy(vfio.device_class); vfio.device_class = NULL; err_dev_class: vfio_virqfd_exit(); err_virqfd: vfio_group_cleanup(); return ret; } static void __exit vfio_cleanup(void) { vfio_debugfs_remove_root(); ida_destroy(&vfio.device_ida); vfio_cdev_cleanup(); class_destroy(vfio.device_class); vfio.device_class = NULL; vfio_virqfd_exit(); vfio_group_cleanup(); xa_destroy(&vfio_device_set_xa); } module_init(vfio_init); module_exit(vfio_cleanup); MODULE_IMPORT_NS(IOMMUFD); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");
8 9 2 2 3 3 2 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/ch11.h> #define TEST_SE0_NAK_PID 0x0101 #define TEST_J_PID 0x0102 #define TEST_K_PID 0x0103 #define TEST_PACKET_PID 0x0104 #define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106 #define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107 #define TEST_SINGLE_STEP_SET_FEATURE 0x0108 extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev, const struct usb_device_id *id); /* * A list of USB hubs which requires to disable the power * to the port before starting the testing procedures. */ static const struct usb_device_id ehset_hub_list[] = { { USB_DEVICE(0x0424, 0x4502) }, { USB_DEVICE(0x0424, 0x4913) }, { USB_DEVICE(0x0451, 0x8027) }, { } }; static int ehset_prepare_port_for_testing(struct usb_device *hub_udev, u16 portnum) { int ret = 0; /* * The USB2.0 spec chapter 11.24.2.13 says that the USB port which is * going under test needs to be put in suspend before sending the * test command. Most hubs don't enforce this precondition, but there * are some hubs which needs to disable the power to the port before * starting the test. */ if (usb_device_match_id(hub_udev, ehset_hub_list)) { ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE, USB_RT_PORT, USB_PORT_FEAT_ENABLE, portnum, NULL, 0, 1000, GFP_KERNEL); /* * Wait for the port to be disabled. It's an arbitrary value * which worked every time. */ msleep(100); } else { /* * For the hubs which are compliant with the spec, * put the port in SUSPEND. */ ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_SUSPEND, portnum, NULL, 0, 1000, GFP_KERNEL); } return ret; } static int ehset_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret = -EINVAL; struct usb_device *dev = interface_to_usbdev(intf); struct usb_device *hub_udev = dev->parent; struct usb_device_descriptor buf; u8 portnum = dev->portnum; u16 test_pid = le16_to_cpu(dev->descriptor.idProduct); switch (test_pid) { case TEST_SE0_NAK_PID: ret = ehset_prepare_port_for_testing(hub_udev, portnum); if (ret < 0) break; ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, (USB_TEST_SE0_NAK << 8) | portnum, NULL, 0, 1000, GFP_KERNEL); break; case TEST_J_PID: ret = ehset_prepare_port_for_testing(hub_udev, portnum); if (ret < 0) break; ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, (USB_TEST_J << 8) | portnum, NULL, 0, 1000, GFP_KERNEL); break; case TEST_K_PID: ret = ehset_prepare_port_for_testing(hub_udev, portnum); if (ret < 0) break; ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, (USB_TEST_K << 8) | portnum, NULL, 0, 1000, GFP_KERNEL); break; case TEST_PACKET_PID: ret = ehset_prepare_port_for_testing(hub_udev, portnum); if (ret < 0) break; ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, (USB_TEST_PACKET << 8) | portnum, NULL, 0, 1000, GFP_KERNEL); break; case TEST_HS_HOST_PORT_SUSPEND_RESUME: /* Test: wait for 15secs -> suspend -> 15secs delay -> resume */ msleep(15 * 1000); ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_SUSPEND, portnum, NULL, 0, 1000, GFP_KERNEL); if (ret < 0) break; msleep(15 * 1000); ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE, USB_RT_PORT, USB_PORT_FEAT_SUSPEND, portnum, NULL, 0, 1000, GFP_KERNEL); break; case TEST_SINGLE_STEP_GET_DEV_DESC: /* Test: wait for 15secs -> GetDescriptor request */ msleep(15 * 1000); ret = usb_control_msg_recv(dev, 0, USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, &buf, USB_DT_DEVICE_SIZE, USB_CTRL_GET_TIMEOUT, GFP_KERNEL); break; case TEST_SINGLE_STEP_SET_FEATURE: /* * GetDescriptor SETUP request -> 15secs delay -> IN & STATUS * * Note, this test is only supported on root hubs since the * SetPortFeature handling can only be done inside the HCD's * hub_control callback function. */ if (hub_udev != dev->bus->root_hub) { dev_err(&intf->dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n"); break; } ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE, USB_RT_PORT, USB_PORT_FEAT_TEST, (6 << 8) | portnum, NULL, 0, 60 * 1000, GFP_KERNEL); break; default: dev_err(&intf->dev, "%s: unsupported PID: 0x%x\n", __func__, test_pid); } return ret; } static void ehset_disconnect(struct usb_interface *intf) { } static const struct usb_device_id ehset_id_table[] = { { USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) }, { USB_DEVICE(0x1a0a, TEST_J_PID) }, { USB_DEVICE(0x1a0a, TEST_K_PID) }, { USB_DEVICE(0x1a0a, TEST_PACKET_PID) }, { USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) }, { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) }, { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ehset_id_table); static struct usb_driver ehset_driver = { .name = "usb_ehset_test", .probe = ehset_probe, .disconnect = ehset_disconnect, .id_table = ehset_id_table, }; module_usb_driver(ehset_driver); MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture"); MODULE_LICENSE("GPL v2");
11 11 6428 6427 27 27 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/of_iommu.h> #include <linux/of_reserved_mem.h> #include <linux/dma-direct.h> /* for bus_dma_region */ #include <linux/dma-map-ops.h> #include <linux/init.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <asm/errno.h> #include "of_private.h" /** * of_match_device - Tell if a struct device matches an of_device_id list * @matches: array of of device match structures to search in * @dev: the of device structure to match against * * Used by a driver to check whether an platform_device present in the * system is in its list of supported devices. */ const struct of_device_id *of_match_device(const struct of_device_id *matches, const struct device *dev) { if (!matches || !dev->of_node || dev->of_node_reused) return NULL; return of_match_node(matches, dev->of_node); } EXPORT_SYMBOL(of_match_device); static void of_dma_set_restricted_buffer(struct device *dev, struct device_node *np) { struct device_node *node, *of_node = dev->of_node; int count, i; if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL)) return; count = of_property_count_elems_of_size(of_node, "memory-region", sizeof(u32)); /* * If dev->of_node doesn't exist or doesn't contain memory-region, try * the OF node having DMA configuration. */ if (count <= 0) { of_node = np; count = of_property_count_elems_of_size( of_node, "memory-region", sizeof(u32)); } for (i = 0; i < count; i++) { node = of_parse_phandle(of_node, "memory-region", i); /* * There might be multiple memory regions, but only one * restricted-dma-pool region is allowed. */ if (of_device_is_compatible(node, "restricted-dma-pool") && of_device_is_available(node)) { of_node_put(node); break; } of_node_put(node); } /* * Attempt to initialize a restricted-dma-pool region if one was found. * Note that count can hold a negative error code. */ if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n"); } /** * of_dma_configure_id - Setup DMA configuration * @dev: Device to apply DMA configuration * @np: Pointer to OF node having DMA configuration * @force_dma: Whether device is to be set up by of_dma_configure() even if * DMA capability is not explicitly described by firmware. * @id: Optional const pointer value input id * * Try to get devices's DMA configuration from DT and update it * accordingly. * * If platform code needs to use its own special DMA configuration, it * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events * to fix up DMA configuration. */ int of_dma_configure_id(struct device *dev, struct device_node *np, bool force_dma, const u32 *id) { const struct bus_dma_region *map = NULL; struct device_node *bus_np; u64 mask, end = 0; bool coherent, set_map = false; int ret; if (np == dev->of_node) bus_np = __of_get_dma_parent(np); else bus_np = of_node_get(np); ret = of_dma_get_range(bus_np, &map); of_node_put(bus_np); if (ret < 0) { /* * For legacy reasons, we have to assume some devices need * DMA configuration regardless of whether "dma-ranges" is * correctly specified or not. */ if (!force_dma) return ret == -ENODEV ? 0 : ret; } else { /* Determine the overall bounds of all DMA regions */ end = dma_range_map_max(map); set_map = true; } /* * If @dev is expected to be DMA-capable then the bus code that created * it should have initialised its dma_mask pointer by this point. For * now, we'll continue the legacy behaviour of coercing it to the * coherent mask if not, but we'll no longer do so quietly. */ if (!dev->dma_mask) { dev_warn(dev, "DMA mask not set\n"); dev->dma_mask = &dev->coherent_dma_mask; } if (!end && dev->coherent_dma_mask) end = dev->coherent_dma_mask; else if (!end) end = (1ULL << 32) - 1; /* * Limit coherent and dma mask based on size and default mask * set by the driver. */ mask = DMA_BIT_MASK(ilog2(end) + 1); dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; /* ...but only set bus limit and range map if we found valid dma-ranges earlier */ if (set_map) { dev->bus_dma_limit = end; dev->dma_range_map = map; } coherent = of_dma_is_coherent(np); dev_dbg(dev, "device is%sdma coherent\n", coherent ? " " : " not "); ret = of_iommu_configure(dev, np, id); if (ret == -EPROBE_DEFER) { /* Don't touch range map if it wasn't set from a valid dma-ranges */ if (set_map) dev->dma_range_map = NULL; kfree(map); return -EPROBE_DEFER; } /* Take all other IOMMU errors to mean we'll just carry on without it */ dev_dbg(dev, "device is%sbehind an iommu\n", !ret ? " " : " not "); arch_setup_dma_ops(dev, coherent); if (ret) of_dma_set_restricted_buffer(dev, np); return 0; } EXPORT_SYMBOL_GPL(of_dma_configure_id); const void *of_device_get_match_data(const struct device *dev) { const struct of_device_id *match; match = of_match_device(dev->driver->of_match_table, dev); if (!match) return NULL; return match->data; } EXPORT_SYMBOL(of_device_get_match_data); /** * of_device_modalias - Fill buffer with newline terminated modalias string * @dev: Calling device * @str: Modalias string * @len: Size of @str */ ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len) { ssize_t sl; if (!dev || !dev->of_node || dev->of_node_reused) return -ENODEV; sl = of_modalias(dev->of_node, str, len - 2); if (sl < 0) return sl; if (sl > len - 2) return -ENOMEM; str[sl++] = '\n'; str[sl] = 0; return sl; } EXPORT_SYMBOL_GPL(of_device_modalias); /** * of_device_uevent - Display OF related uevent information * @dev: Device to display the uevent information for * @env: Kernel object's userspace event reference to fill up */ void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { const char *compat, *type; struct alias_prop *app; struct property *p; int seen = 0; if ((!dev) || (!dev->of_node)) return; add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node); add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node); type = of_node_get_device_type(dev->of_node); if (type) add_uevent_var(env, "OF_TYPE=%s", type); /* Since the compatible field can contain pretty much anything * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ of_property_for_each_string(dev->of_node, "compatible", p, compat) { add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat); seen++; } add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); seen = 0; mutex_lock(&of_mutex); list_for_each_entry(app, &aliases_lookup, link) { if (dev->of_node == app->np) { add_uevent_var(env, "OF_ALIAS_%d=%s", seen, app->alias); seen++; } } mutex_unlock(&of_mutex); } EXPORT_SYMBOL_GPL(of_device_uevent); int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { int sl; if ((!dev) || (!dev->of_node) || dev->of_node_reused) return -ENODEV; /* Devicetree modalias is tricky, we add it in 2 steps */ if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; sl = of_modalias(dev->of_node, &env->buf[env->buflen-1], sizeof(env->buf) - env->buflen); if (sl < 0) return sl; if (sl >= (sizeof(env->buf) - env->buflen)) return -ENOMEM; env->buflen += sl; return 0; } EXPORT_SYMBOL_GPL(of_device_uevent_modalias); /** * of_device_make_bus_id - Use the device node data to assign a unique name * @dev: pointer to device structure that is linked to a device tree node * * This routine will first try using the translated bus address to * derive a unique name. If it cannot, then it will prepend names from * parent nodes until a unique name can be derived. */ void of_device_make_bus_id(struct device *dev) { struct device_node *node = dev->of_node; const __be32 *reg; u64 addr; u32 mask; /* Construct the name, using parent nodes if necessary to ensure uniqueness */ while (node->parent) { /* * If the address can be translated, then that is as much * uniqueness as we need. Make it the first component and return */ reg = of_get_property(node, "reg", NULL); if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { if (!of_property_read_u32(node, "mask", &mask)) dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn", addr, ffs(mask) - 1, node, dev_name(dev)); else dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", addr, node, dev_name(dev)); return; } /* format arguments only used if dev_name() resolves to NULL */ dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s", kbasename(node->full_name), dev_name(dev)); node = node->parent; } } EXPORT_SYMBOL_GPL(of_device_make_bus_id);
11 10 8 85 5 83 153 154 154 154 295 295 295 295 295 295 295 295 308 309 309 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 // SPDX-License-Identifier: GPL-2.0-or-later /* * Advanced Linux Sound Architecture * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/device.h> #include <linux/module.h> #include <linux/debugfs.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> #include <sound/control.h> #include <sound/initval.h> #include <linux/kmod.h> #include <linux/mutex.h> static int major = CONFIG_SND_MAJOR; int snd_major; EXPORT_SYMBOL(snd_major); static int cards_limit = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Advanced Linux Sound Architecture driver for soundcards."); MODULE_LICENSE("GPL"); module_param(major, int, 0444); MODULE_PARM_DESC(major, "Major # for sound driver."); module_param(cards_limit, int, 0444); MODULE_PARM_DESC(cards_limit, "Count of auto-loadable soundcards."); MODULE_ALIAS_CHARDEV_MAJOR(CONFIG_SND_MAJOR); /* this one holds the actual max. card number currently available. * as default, it's identical with cards_limit option. when more * modules are loaded manually, this limit number increases, too. */ int snd_ecards_limit; EXPORT_SYMBOL(snd_ecards_limit); #ifdef CONFIG_SND_DEBUG struct dentry *sound_debugfs_root; EXPORT_SYMBOL_GPL(sound_debugfs_root); #endif static struct snd_minor *snd_minors[SNDRV_OS_MINORS]; static DEFINE_MUTEX(sound_mutex); #ifdef CONFIG_MODULES /** * snd_request_card - try to load the card module * @card: the card number * * Tries to load the module "snd-card-X" for the given card number * via request_module. Returns immediately if already loaded. */ void snd_request_card(int card) { if (snd_card_locked(card)) return; if (card < 0 || card >= cards_limit) return; request_module("snd-card-%i", card); } EXPORT_SYMBOL(snd_request_card); static void snd_request_other(int minor) { char *str; switch (minor) { case SNDRV_MINOR_SEQUENCER: str = "snd-seq"; break; case SNDRV_MINOR_TIMER: str = "snd-timer"; break; default: return; } request_module(str); } #endif /* modular kernel */ /** * snd_lookup_minor_data - get user data of a registered device * @minor: the minor number * @type: device type (SNDRV_DEVICE_TYPE_XXX) * * Checks that a minor device with the specified type is registered, and returns * its user data pointer. * * This function increments the reference counter of the card instance * if an associated instance with the given minor number and type is found. * The caller must call snd_card_unref() appropriately later. * * Return: The user data pointer if the specified device is found. %NULL * otherwise. */ void *snd_lookup_minor_data(unsigned int minor, int type) { struct snd_minor *mreg; void *private_data; if (minor >= ARRAY_SIZE(snd_minors)) return NULL; guard(mutex)(&sound_mutex); mreg = snd_minors[minor]; if (mreg && mreg->type == type) { private_data = mreg->private_data; if (private_data && mreg->card_ptr) get_device(&mreg->card_ptr->card_dev); } else private_data = NULL; return private_data; } EXPORT_SYMBOL(snd_lookup_minor_data); #ifdef CONFIG_MODULES static struct snd_minor *autoload_device(unsigned int minor) { int dev; mutex_unlock(&sound_mutex); /* release lock temporarily */ dev = SNDRV_MINOR_DEVICE(minor); if (dev == SNDRV_MINOR_CONTROL) { /* /dev/aloadC? */ int card = SNDRV_MINOR_CARD(minor); struct snd_card *ref = snd_card_ref(card); if (!ref) snd_request_card(card); else snd_card_unref(ref); } else if (dev == SNDRV_MINOR_GLOBAL) { /* /dev/aloadSEQ */ snd_request_other(minor); } mutex_lock(&sound_mutex); /* reacuire lock */ return snd_minors[minor]; } #else /* !CONFIG_MODULES */ #define autoload_device(minor) NULL #endif /* CONFIG_MODULES */ static int snd_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct snd_minor *mptr = NULL; const struct file_operations *new_fops; int err = 0; if (minor >= ARRAY_SIZE(snd_minors)) return -ENODEV; scoped_guard(mutex, &sound_mutex) { mptr = snd_minors[minor]; if (mptr == NULL) { mptr = autoload_device(minor); if (!mptr) return -ENODEV; } new_fops = fops_get(mptr->f_ops); } if (!new_fops) return -ENODEV; replace_fops(file, new_fops); if (file->f_op->open) err = file->f_op->open(inode, file); return err; } static const struct file_operations snd_fops = { .owner = THIS_MODULE, .open = snd_open, .llseek = noop_llseek, }; #ifdef CONFIG_SND_DYNAMIC_MINORS static int snd_find_free_minor(int type, struct snd_card *card, int dev) { int minor; /* static minors for module auto loading */ if (type == SNDRV_DEVICE_TYPE_SEQUENCER) return SNDRV_MINOR_SEQUENCER; if (type == SNDRV_DEVICE_TYPE_TIMER) return SNDRV_MINOR_TIMER; for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor) { /* skip static minors still used for module auto loading */ if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL) continue; if (minor == SNDRV_MINOR_SEQUENCER || minor == SNDRV_MINOR_TIMER) continue; if (!snd_minors[minor]) return minor; } return -EBUSY; } #else static int snd_find_free_minor(int type, struct snd_card *card, int dev) { int minor; switch (type) { case SNDRV_DEVICE_TYPE_SEQUENCER: case SNDRV_DEVICE_TYPE_TIMER: minor = type; break; case SNDRV_DEVICE_TYPE_CONTROL: if (snd_BUG_ON(!card)) return -EINVAL; minor = SNDRV_MINOR(card->number, type); break; case SNDRV_DEVICE_TYPE_HWDEP: case SNDRV_DEVICE_TYPE_RAWMIDI: case SNDRV_DEVICE_TYPE_PCM_PLAYBACK: case SNDRV_DEVICE_TYPE_PCM_CAPTURE: case SNDRV_DEVICE_TYPE_COMPRESS: if (snd_BUG_ON(!card)) return -EINVAL; minor = SNDRV_MINOR(card->number, type + dev); break; default: return -EINVAL; } if (snd_BUG_ON(minor < 0 || minor >= SNDRV_OS_MINORS)) return -EINVAL; if (snd_minors[minor]) return -EBUSY; return minor; } #endif /** * snd_register_device - Register the ALSA device file for the card * @type: the device type, SNDRV_DEVICE_TYPE_XXX * @card: the card instance * @dev: the device index * @f_ops: the file operations * @private_data: user pointer for f_ops->open() * @device: the device to register * * Registers an ALSA device file for the given card. * The operators have to be set in reg parameter. * * Return: Zero if successful, or a negative error code on failure. */ int snd_register_device(int type, struct snd_card *card, int dev, const struct file_operations *f_ops, void *private_data, struct device *device) { int minor; int err = 0; struct snd_minor *preg; if (snd_BUG_ON(!device)) return -EINVAL; preg = kmalloc(sizeof *preg, GFP_KERNEL); if (preg == NULL) return -ENOMEM; preg->type = type; preg->card = card ? card->number : -1; preg->device = dev; preg->f_ops = f_ops; preg->private_data = private_data; preg->card_ptr = card; guard(mutex)(&sound_mutex); minor = snd_find_free_minor(type, card, dev); if (minor < 0) { err = minor; goto error; } preg->dev = device; device->devt = MKDEV(major, minor); err = device_add(device); if (err < 0) goto error; snd_minors[minor] = preg; error: if (err < 0) kfree(preg); return err; } EXPORT_SYMBOL(snd_register_device); /** * snd_unregister_device - unregister the device on the given card * @dev: the device instance * * Unregisters the device file already registered via * snd_register_device(). * * Return: Zero if successful, or a negative error code on failure. */ int snd_unregister_device(struct device *dev) { int minor; struct snd_minor *preg; guard(mutex)(&sound_mutex); for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor) { preg = snd_minors[minor]; if (preg && preg->dev == dev) { snd_minors[minor] = NULL; device_del(dev); kfree(preg); break; } } if (minor >= ARRAY_SIZE(snd_minors)) return -ENOENT; return 0; } EXPORT_SYMBOL(snd_unregister_device); #ifdef CONFIG_SND_PROC_FS /* * INFO PART */ static const char *snd_device_type_name(int type) { switch (type) { case SNDRV_DEVICE_TYPE_CONTROL: return "control"; case SNDRV_DEVICE_TYPE_HWDEP: return "hardware dependent"; case SNDRV_DEVICE_TYPE_RAWMIDI: return "raw midi"; case SNDRV_DEVICE_TYPE_PCM_PLAYBACK: return "digital audio playback"; case SNDRV_DEVICE_TYPE_PCM_CAPTURE: return "digital audio capture"; case SNDRV_DEVICE_TYPE_SEQUENCER: return "sequencer"; case SNDRV_DEVICE_TYPE_TIMER: return "timer"; case SNDRV_DEVICE_TYPE_COMPRESS: return "compress"; default: return "?"; } } static void snd_minor_info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int minor; struct snd_minor *mptr; guard(mutex)(&sound_mutex); for (minor = 0; minor < SNDRV_OS_MINORS; ++minor) { mptr = snd_minors[minor]; if (!mptr) continue; if (mptr->card >= 0) { if (mptr->device >= 0) snd_iprintf(buffer, "%3i: [%2i-%2i]: %s\n", minor, mptr->card, mptr->device, snd_device_type_name(mptr->type)); else snd_iprintf(buffer, "%3i: [%2i] : %s\n", minor, mptr->card, snd_device_type_name(mptr->type)); } else snd_iprintf(buffer, "%3i: : %s\n", minor, snd_device_type_name(mptr->type)); } } int __init snd_minor_info_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "devices", NULL); if (!entry) return -ENOMEM; entry->c.text.read = snd_minor_info_read; return snd_info_register(entry); /* freed in error path */ } #endif /* CONFIG_SND_PROC_FS */ /* * INIT PART */ static int __init alsa_sound_init(void) { snd_major = major; snd_ecards_limit = cards_limit; if (register_chrdev(major, "alsa", &snd_fops)) { pr_err("ALSA core: unable to register native major device number %d\n", major); return -EIO; } if (snd_info_init() < 0) { unregister_chrdev(major, "alsa"); return -ENOMEM; } #ifdef CONFIG_SND_DEBUG sound_debugfs_root = debugfs_create_dir("sound", NULL); #endif #ifndef MODULE pr_info("Advanced Linux Sound Architecture Driver Initialized.\n"); #endif return 0; } static void __exit alsa_sound_exit(void) { #ifdef CONFIG_SND_DEBUG debugfs_remove(sound_debugfs_root); #endif snd_info_done(); unregister_chrdev(major, "alsa"); } subsys_initcall(alsa_sound_init); module_exit(alsa_sound_exit);
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for VRC-2 2-axis Car controller * * Copyright (C) 2022 Marcus Folkesson <marcus.folkesson@gmail.com> */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> /* * VID/PID are probably "borrowed", so keep them locally and * do not populate hid-ids.h with those. */ #define USB_VENDOR_ID_VRC2 (0x07c0) #define USB_DEVICE_ID_VRC2 (0x1125) static __u8 vrc2_rdesc_fixed[] = { 0x05, 0x01, // Usage Page (Generic Desktop Ctrls) 0x09, 0x04, // Usage (Joystick) 0xA1, 0x01, // Collection (Application) 0x09, 0x01, // Usage (Pointer) 0xA1, 0x00, // Collection (Physical) 0x09, 0x30, // Usage (X) 0x09, 0x31, // Usage (Y) 0x15, 0x00, // Logical Minimum (0) 0x26, 0xFF, 0x07, // Logical Maximum (2047) 0x35, 0x00, // Physical Minimum (0) 0x46, 0xFF, 0x00, // Physical Maximum (255) 0x75, 0x10, // Report Size (16) 0x95, 0x02, // Report Count (2) 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) 0xC0, // End Collection 0x75, 0x08, // Report Size (8) 0x95, 0x03, // Report Count (3) 0x81, 0x03, // Input (Cnst,Var,Abs) 0xC0, // End Collection }; static __u8 *vrc2_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { hid_info(hdev, "fixing up VRC-2 report descriptor\n"); *rsize = sizeof(vrc2_rdesc_fixed); return vrc2_rdesc_fixed; } static int vrc2_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; /* * The device gives us 2 separate USB endpoints. * One of those (the one with report descriptor size of 23) is just bogus so ignore it */ if (hdev->dev_rsize == 23) return -ENODEV; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } return 0; } static const struct hid_device_id vrc2_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_VRC2, USB_DEVICE_ID_VRC2) }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(hid, vrc2_devices); static struct hid_driver vrc2_driver = { .name = "vrc2", .id_table = vrc2_devices, .report_fixup = vrc2_report_fixup, .probe = vrc2_probe, }; module_hid_driver(vrc2_driver); MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>"); MODULE_DESCRIPTION("HID driver for VRC-2 2-axis Car controller"); MODULE_LICENSE("GPL");
69 284 2268 69 1098 1976 1976 2393 2595 1961 659 2111 508 2123 574 2597 2325 290 2123 574 2112 507 1960 661 96 69 2599 2599 384 2229 2598 1656 1016 2571 1516 1516 26 14 13 14 13 14 4 12 314 2596 7 7 4 3 7 23 23 23 18 6 2595 1 23 20 4 2308 267 1901 1899 1347 1563 923 1495 1452 107 40 107 14 4 13 2 6 1543 349 1255 859 734 506 1149 544 903 206 145 459 77 589 904 3 3 4 10 874 70 45 31 3 59 19 744 874 42 4 4 12 71 2600 2599 2600 2600 2597 2600 2597 2596 2598 2600 2252 563 1457 45 1484 35 37 1235 60 383 1563 23 51 15 9 56 24 63 5 5 10 18 53 11 67 24 51 30 23 270 269 4 266 10 10 10 10 7 1 7 2 5 4 3 1 3 2 1 29 4 42 2 141 3 123 171 320 162 332 908 10 10 897 909 9 9 8 9 62 1506 12 1516 23 7 81 591 377 254 185 487 1048 84 83 475 114 16 100 24 2 326 331 5 5 262 241 2600 2596 2596 2597 2598 2600 13 69 309 307 69 69 69 69 69 69 307 2599 189 2599 189 2599 2597 2595 2596 2595 2595 2597 2595 189 189 2596 2857 33 45 13 10 4 9 1 4 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 // SPDX-License-Identifier: GPL-2.0-only /* * BPF JIT compiler * * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com */ #include <linux/netdevice.h> #include <linux/filter.h> #include <linux/if_vlan.h> #include <linux/bpf.h> #include <linux/memory.h> #include <linux/sort.h> #include <asm/extable.h> #include <asm/ftrace.h> #include <asm/set_memory.h> #include <asm/nospec-branch.h> #include <asm/text-patching.h> #include <asm/unwind.h> #include <asm/cfi.h> static bool all_callee_regs_used[4] = {true, true, true, true}; static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { if (len == 1) *ptr = bytes; else if (len == 2) *(u16 *)ptr = bytes; else { *(u32 *)ptr = bytes; barrier(); } return ptr + len; } #define EMIT(bytes, len) \ do { prog = emit_code(prog, bytes, len); } while (0) #define EMIT1(b1) EMIT(b1, 1) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) #define EMIT1_off32(b1, off) \ do { EMIT1(b1); EMIT(off, 4); } while (0) #define EMIT2_off32(b1, b2, off) \ do { EMIT2(b1, b2); EMIT(off, 4); } while (0) #define EMIT3_off32(b1, b2, b3, off) \ do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) #define EMIT4_off32(b1, b2, b3, b4, off) \ do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) #ifdef CONFIG_X86_KERNEL_IBT #define EMIT_ENDBR() EMIT(gen_endbr(), 4) #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4) #else #define EMIT_ENDBR() #define EMIT_ENDBR_POISON() #endif static bool is_imm8(int value) { return value <= 127 && value >= -128; } static bool is_simm32(s64 value) { return value == (s64)(s32)value; } static bool is_uimm32(u64 value) { return value == (u64)(u32)value; } /* mov dst, src */ #define EMIT_mov(DST, SRC) \ do { \ if (DST != SRC) \ EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ } while (0) static int bpf_size_to_x86_bytes(int bpf_size) { if (bpf_size == BPF_W) return 4; else if (bpf_size == BPF_H) return 2; else if (bpf_size == BPF_B) return 1; else if (bpf_size == BPF_DW) return 4; /* imm32 */ else return 0; } /* * List of x86 cond jumps opcodes (. + s8) * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) */ #define X86_JB 0x72 #define X86_JAE 0x73 #define X86_JE 0x74 #define X86_JNE 0x75 #define X86_JBE 0x76 #define X86_JA 0x77 #define X86_JL 0x7C #define X86_JGE 0x7D #define X86_JLE 0x7E #define X86_JG 0x7F /* Pick a register outside of BPF range for JIT internal work */ #define AUX_REG (MAX_BPF_JIT_REG + 1) #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) #define X86_REG_R12 (MAX_BPF_JIT_REG + 3) /* * The following table maps BPF registers to x86-64 registers. * * x86-64 register R12 is unused, since if used as base address * register in load/store instructions, it always needs an * extra byte of encoding and is callee saved. * * x86-64 register R9 is not used by BPF programs, but can be used by BPF * trampoline. x86-64 register R10 is used for blinding (if enabled). */ static const int reg2hex[] = { [BPF_REG_0] = 0, /* RAX */ [BPF_REG_1] = 7, /* RDI */ [BPF_REG_2] = 6, /* RSI */ [BPF_REG_3] = 2, /* RDX */ [BPF_REG_4] = 1, /* RCX */ [BPF_REG_5] = 0, /* R8 */ [BPF_REG_6] = 3, /* RBX callee saved */ [BPF_REG_7] = 5, /* R13 callee saved */ [BPF_REG_8] = 6, /* R14 callee saved */ [BPF_REG_9] = 7, /* R15 callee saved */ [BPF_REG_FP] = 5, /* RBP readonly */ [BPF_REG_AX] = 2, /* R10 temp register */ [AUX_REG] = 3, /* R11 temp register */ [X86_REG_R9] = 1, /* R9 register, 6th function argument */ [X86_REG_R12] = 4, /* R12 callee saved */ }; static const int reg2pt_regs[] = { [BPF_REG_0] = offsetof(struct pt_regs, ax), [BPF_REG_1] = offsetof(struct pt_regs, di), [BPF_REG_2] = offsetof(struct pt_regs, si), [BPF_REG_3] = offsetof(struct pt_regs, dx), [BPF_REG_4] = offsetof(struct pt_regs, cx), [BPF_REG_5] = offsetof(struct pt_regs, r8), [BPF_REG_6] = offsetof(struct pt_regs, bx), [BPF_REG_7] = offsetof(struct pt_regs, r13), [BPF_REG_8] = offsetof(struct pt_regs, r14), [BPF_REG_9] = offsetof(struct pt_regs, r15), }; /* * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 * which need extra byte of encoding. * rax,rcx,...,rbp have simpler encoding */ static bool is_ereg(u32 reg) { return (1 << reg) & (BIT(BPF_REG_5) | BIT(AUX_REG) | BIT(BPF_REG_7) | BIT(BPF_REG_8) | BIT(BPF_REG_9) | BIT(X86_REG_R9) | BIT(X86_REG_R12) | BIT(BPF_REG_AX)); } /* * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte * of encoding. al,cl,dl,bl have simpler encoding. */ static bool is_ereg_8l(u32 reg) { return is_ereg(reg) || (1 << reg) & (BIT(BPF_REG_1) | BIT(BPF_REG_2) | BIT(BPF_REG_FP)); } static bool is_axreg(u32 reg) { return reg == BPF_REG_0; } /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ static u8 add_1mod(u8 byte, u32 reg) { if (is_ereg(reg)) byte |= 1; return byte; } static u8 add_2mod(u8 byte, u32 r1, u32 r2) { if (is_ereg(r1)) byte |= 1; if (is_ereg(r2)) byte |= 4; return byte; } static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index) { if (is_ereg(r1)) byte |= 1; if (is_ereg(index)) byte |= 2; if (is_ereg(r2)) byte |= 4; return byte; } /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ static u8 add_1reg(u8 byte, u32 dst_reg) { return byte + reg2hex[dst_reg]; } /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) { return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } /* Some 1-byte opcodes for binary ALU operations */ static u8 simple_alu_opcodes[] = { [BPF_ADD] = 0x01, [BPF_SUB] = 0x29, [BPF_AND] = 0x21, [BPF_OR] = 0x09, [BPF_XOR] = 0x31, [BPF_LSH] = 0xE0, [BPF_RSH] = 0xE8, [BPF_ARSH] = 0xF8, }; static void jit_fill_hole(void *area, unsigned int size) { /* Fill whole space with INT3 instructions */ memset(area, 0xcc, size); } int bpf_arch_text_invalidate(void *dst, size_t len) { return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); } struct jit_context { int cleanup_addr; /* Epilogue code offset */ /* * Program specific offsets of labels in the code; these rely on the * JIT doing at least 2 passes, recording the position on the first * pass, only to generate the correct offset on the second pass. */ int tail_call_direct_label; int tail_call_indirect_label; }; /* Maximum number of bytes emitted while JITing one eBPF insn */ #define BPF_MAX_INSN_SIZE 128 #define BPF_INSN_SAFETY 64 /* Number of bytes emit_patch() needs to generate instructions */ #define X86_PATCH_SIZE 5 /* Number of bytes that will be skipped on tailcall */ #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) static void push_r12(u8 **pprog) { u8 *prog = *pprog; EMIT2(0x41, 0x54); /* push r12 */ *pprog = prog; } static void push_callee_regs(u8 **pprog, bool *callee_regs_used) { u8 *prog = *pprog; if (callee_regs_used[0]) EMIT1(0x53); /* push rbx */ if (callee_regs_used[1]) EMIT2(0x41, 0x55); /* push r13 */ if (callee_regs_used[2]) EMIT2(0x41, 0x56); /* push r14 */ if (callee_regs_used[3]) EMIT2(0x41, 0x57); /* push r15 */ *pprog = prog; } static void pop_r12(u8 **pprog) { u8 *prog = *pprog; EMIT2(0x41, 0x5C); /* pop r12 */ *pprog = prog; } static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) { u8 *prog = *pprog; if (callee_regs_used[3]) EMIT2(0x41, 0x5F); /* pop r15 */ if (callee_regs_used[2]) EMIT2(0x41, 0x5E); /* pop r14 */ if (callee_regs_used[1]) EMIT2(0x41, 0x5D); /* pop r13 */ if (callee_regs_used[0]) EMIT1(0x5B); /* pop rbx */ *pprog = prog; } static void emit_nops(u8 **pprog, int len) { u8 *prog = *pprog; int i, noplen; while (len > 0) { noplen = len; if (noplen > ASM_NOP_MAX) noplen = ASM_NOP_MAX; for (i = 0; i < noplen; i++) EMIT1(x86_nops[noplen][i]); len -= noplen; } *pprog = prog; } /* * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT * in arch/x86/kernel/alternative.c */ static void emit_fineibt(u8 **pprog, u32 hash) { u8 *prog = *pprog; EMIT_ENDBR(); EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ EMIT2(0x74, 0x07); /* jz.d8 +7 */ EMIT2(0x0f, 0x0b); /* ud2 */ EMIT1(0x90); /* nop */ EMIT_ENDBR_POISON(); *pprog = prog; } static void emit_kcfi(u8 **pprog, u32 hash) { u8 *prog = *pprog; EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ #ifdef CONFIG_CALL_PADDING EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); EMIT1(0x90); #endif EMIT_ENDBR(); *pprog = prog; } static void emit_cfi(u8 **pprog, u32 hash) { u8 *prog = *pprog; switch (cfi_mode) { case CFI_FINEIBT: emit_fineibt(&prog, hash); break; case CFI_KCFI: emit_kcfi(&prog, hash); break; default: EMIT_ENDBR(); break; } *pprog = prog; } /* * Emit x86-64 prologue code for BPF program. * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes * while jumping to another program */ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, bool tail_call_reachable, bool is_subprog, bool is_exception_cb) { u8 *prog = *pprog; emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash); /* BPF trampoline can be made to work without these nops, * but let's waste 5 bytes for now and optimize later */ emit_nops(&prog, X86_PATCH_SIZE); if (!ebpf_from_cbpf) { if (tail_call_reachable && !is_subprog) /* When it's the entry of the whole tailcall context, * zeroing rax means initialising tail_call_cnt. */ EMIT2(0x31, 0xC0); /* xor eax, eax */ else /* Keep the same instruction layout. */ EMIT2(0x66, 0x90); /* nop2 */ } /* Exception callback receives FP as third parameter */ if (is_exception_cb) { EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */ EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */ /* The main frame must have exception_boundary as true, so we * first restore those callee-saved regs from stack, before * reusing the stack frame. */ pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); /* Reset the stack frame. */ EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */ } else { EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ } /* X86_TAIL_CALL_OFFSET is here */ EMIT_ENDBR(); /* sub rsp, rounded_stack_depth */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); if (tail_call_reachable) EMIT1(0x50); /* push rax */ *pprog = prog; } static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) { u8 *prog = *pprog; s64 offset; offset = func - (ip + X86_PATCH_SIZE); if (!is_simm32(offset)) { pr_err("Target call %p is out of range\n", func); return -ERANGE; } EMIT1_off32(opcode, offset); *pprog = prog; return 0; } static int emit_call(u8 **pprog, void *func, void *ip) { return emit_patch(pprog, func, ip, 0xE8); } static int emit_rsb_call(u8 **pprog, void *func, void *ip) { OPTIMIZER_HIDE_VAR(func); ip += x86_call_depth_emit_accounting(pprog, func, ip); return emit_patch(pprog, func, ip, 0xE8); } static int emit_jump(u8 **pprog, void *func, void *ip) { return emit_patch(pprog, func, ip, 0xE9); } static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, void *old_addr, void *new_addr) { const u8 *nop_insn = x86_nops[5]; u8 old_insn[X86_PATCH_SIZE]; u8 new_insn[X86_PATCH_SIZE]; u8 *prog; int ret; memcpy(old_insn, nop_insn, X86_PATCH_SIZE); if (old_addr) { prog = old_insn; ret = t == BPF_MOD_CALL ? emit_call(&prog, old_addr, ip) : emit_jump(&prog, old_addr, ip); if (ret) return ret; } memcpy(new_insn, nop_insn, X86_PATCH_SIZE); if (new_addr) { prog = new_insn; ret = t == BPF_MOD_CALL ? emit_call(&prog, new_addr, ip) : emit_jump(&prog, new_addr, ip); if (ret) return ret; } ret = -EBUSY; mutex_lock(&text_mutex); if (memcmp(ip, old_insn, X86_PATCH_SIZE)) goto out; ret = 1; if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); ret = 0; } out: mutex_unlock(&text_mutex); return ret; } int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, void *old_addr, void *new_addr) { if (!is_kernel_text((long)ip) && !is_bpf_text_address((long)ip)) /* BPF poking in modules is not supported */ return -EINVAL; /* * See emit_prologue(), for IBT builds the trampoline hook is preceded * with an ENDBR instruction. */ if (is_endbr(*(u32 *)ip)) ip += ENDBR_INSN_SIZE; return __bpf_arch_text_poke(ip, t, old_addr, new_addr); } #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) { u8 *prog = *pprog; if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { EMIT_LFENCE(); EMIT2(0xFF, 0xE0 + reg); } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { OPTIMIZER_HIDE_VAR(reg); if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); else emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); } else { EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS)) EMIT1(0xCC); /* int3 */ } *pprog = prog; } static void emit_return(u8 **pprog, u8 *ip) { u8 *prog = *pprog; if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { emit_jump(&prog, x86_return_thunk, ip); } else { EMIT1(0xC3); /* ret */ if (IS_ENABLED(CONFIG_MITIGATION_SLS)) EMIT1(0xCC); /* int3 */ } *pprog = prog; } /* * Generate the following code: * * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... * if (index >= array->map.max_entries) * goto out; * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; * prog = array->ptrs[index]; * if (prog == NULL) * goto out; * goto *(prog->bpf_func + prologue_size); * out: */ static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, u8 **pprog, bool *callee_regs_used, u32 stack_depth, u8 *ip, struct jit_context *ctx) { int tcc_off = -4 - round_up(stack_depth, 8); u8 *prog = *pprog, *start = *pprog; int offset; /* * rdi - pointer to ctx * rsi - pointer to bpf_array * rdx - index in bpf_array */ /* * if (index >= array->map.max_entries) * goto out; */ EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JBE, offset); /* jbe out */ /* * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; */ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ /* prog = array->ptrs[index]; */ EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ offsetof(struct bpf_array, ptrs)); /* * if (prog == NULL) * goto out; */ EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ offset = ctx->tail_call_indirect_label - (prog + 2 - start); EMIT2(X86_JE, offset); /* je out */ if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); } else { pop_callee_regs(&prog, callee_regs_used); if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) pop_r12(&prog); } EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ round_up(stack_depth, 8)); /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ offsetof(struct bpf_prog, bpf_func)); EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ X86_TAIL_CALL_OFFSET); /* * Now we're ready to jump into next BPF program * rdi == ctx (1st arg) * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET */ emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); /* out: */ ctx->tail_call_indirect_label = prog - start; *pprog = prog; } static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, struct bpf_jit_poke_descriptor *poke, u8 **pprog, u8 *ip, bool *callee_regs_used, u32 stack_depth, struct jit_context *ctx) { int tcc_off = -4 - round_up(stack_depth, 8); u8 *prog = *pprog, *start = *pprog; int offset; /* * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) * goto out; */ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ offset = ctx->tail_call_direct_label - (prog + 2 - start); EMIT2(X86_JAE, offset); /* jae out */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ poke->tailcall_bypass = ip + (prog - start); poke->adj_off = X86_TAIL_CALL_OFFSET; poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, poke->tailcall_bypass); if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); } else { pop_callee_regs(&prog, callee_regs_used); if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) pop_r12(&prog); } EMIT1(0x58); /* pop rax */ if (stack_depth) EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); emit_nops(&prog, X86_PATCH_SIZE); /* out: */ ctx->tail_call_direct_label = prog - start; *pprog = prog; } static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) { struct bpf_jit_poke_descriptor *poke; struct bpf_array *array; struct bpf_prog *target; int i, ret; for (i = 0; i < prog->aux->size_poke_tab; i++) { poke = &prog->aux->poke_tab[i]; if (poke->aux && poke->aux != prog->aux) continue; WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); if (poke->reason != BPF_POKE_REASON_TAIL_CALL) continue; array = container_of(poke->tail_call.map, struct bpf_array, map); mutex_lock(&array->aux->poke_mutex); target = array->ptrs[poke->tail_call.key]; if (target) { ret = __bpf_arch_text_poke(poke->tailcall_target, BPF_MOD_JUMP, NULL, (u8 *)target->bpf_func + poke->adj_off); BUG_ON(ret < 0); ret = __bpf_arch_text_poke(poke->tailcall_bypass, BPF_MOD_JUMP, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, NULL); BUG_ON(ret < 0); } WRITE_ONCE(poke->tailcall_target_stable, true); mutex_unlock(&array->aux->poke_mutex); } } static void emit_mov_imm32(u8 **pprog, bool sign_propagate, u32 dst_reg, const u32 imm32) { u8 *prog = *pprog; u8 b1, b2, b3; /* * Optimization: if imm32 is positive, use 'mov %eax, imm32' * (which zero-extends imm32) to save 2 bytes. */ if (sign_propagate && (s32)imm32 < 0) { /* 'mov %rax, imm32' sign extends imm32 */ b1 = add_1mod(0x48, dst_reg); b2 = 0xC7; b3 = 0xC0; EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); goto done; } /* * Optimization: if imm32 is zero, use 'xor %eax, %eax' * to save 3 bytes. */ if (imm32 == 0) { if (is_ereg(dst_reg)) EMIT1(add_2mod(0x40, dst_reg, dst_reg)); b2 = 0x31; /* xor */ b3 = 0xC0; EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); goto done; } /* mov %eax, imm32 */ if (is_ereg(dst_reg)) EMIT1(add_1mod(0x40, dst_reg)); EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); done: *pprog = prog; } static void emit_mov_imm64(u8 **pprog, u32 dst_reg, const u32 imm32_hi, const u32 imm32_lo) { u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo; u8 *prog = *pprog; if (is_uimm32(imm64)) { /* * For emitting plain u32, where sign bit must not be * propagated LLVM tends to load imm64 over mov32 * directly, so save couple of bytes by just doing * 'mov %eax, imm32' instead. */ emit_mov_imm32(&prog, false, dst_reg, imm32_lo); } else if (is_simm32(imm64)) { emit_mov_imm32(&prog, true, dst_reg, imm32_lo); } else { /* movabsq rax, imm64 */ EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); EMIT(imm32_lo, 4); EMIT(imm32_hi, 4); } *pprog = prog; } static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) { u8 *prog = *pprog; if (is64) { /* mov dst, src */ EMIT_mov(dst_reg, src_reg); } else { /* mov32 dst, src */ if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT1(add_2mod(0x40, dst_reg, src_reg)); EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); } *pprog = prog; } static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, u32 src_reg) { u8 *prog = *pprog; if (is64) { /* movs[b,w,l]q dst, src */ if (num_bits == 8) EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, add_2reg(0xC0, src_reg, dst_reg)); else if (num_bits == 16) EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, add_2reg(0xC0, src_reg, dst_reg)); else if (num_bits == 32) EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, add_2reg(0xC0, src_reg, dst_reg)); } else { /* movs[b,w]l dst, src */ if (num_bits == 8) { EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, add_2reg(0xC0, src_reg, dst_reg)); } else if (num_bits == 16) { if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT1(add_2mod(0x40, src_reg, dst_reg)); EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, add_2reg(0xC0, src_reg, dst_reg)); } } *pprog = prog; } /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) { u8 *prog = *pprog; if (is_imm8(off)) { /* 1-byte signed displacement. * * If off == 0 we could skip this and save one extra byte, but * special case of x86 R13 which always needs an offset is not * worth the hassle */ EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); } else { /* 4-byte signed displacement */ EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); } *pprog = prog; } static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off) { u8 *prog = *pprog; if (is_imm8(off)) { EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); } else { EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); } *pprog = prog; } /* * Emit a REX byte if it will be necessary to address these registers */ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) { u8 *prog = *pprog; if (is64) EMIT1(add_2mod(0x48, dst_reg, src_reg)); else if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT1(add_2mod(0x40, dst_reg, src_reg)); *pprog = prog; } /* * Similar version of maybe_emit_mod() for a single register */ static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) { u8 *prog = *pprog; if (is64) EMIT1(add_1mod(0x48, reg)); else if (is_ereg(reg)) EMIT1(add_1mod(0x40, reg)); *pprog = prog; } /* LDX: dst_reg = *(u8*)(src_reg + off) */ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) { u8 *prog = *pprog; switch (size) { case BPF_B: /* Emit 'movzx rax, byte ptr [rax + off]' */ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); break; case BPF_H: /* Emit 'movzx rax, word ptr [rax + off]' */ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); break; case BPF_W: /* Emit 'mov eax, dword ptr [rax+0x14]' */ if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); else EMIT1(0x8B); break; case BPF_DW: /* Emit 'mov rax, qword ptr [rax+0x14]' */ EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); break; } emit_insn_suffix(&prog, src_reg, dst_reg, off); *pprog = prog; } /* LDSX: dst_reg = *(s8*)(src_reg + off) */ static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) { u8 *prog = *pprog; switch (size) { case BPF_B: /* Emit 'movsx rax, byte ptr [rax + off]' */ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); break; case BPF_H: /* Emit 'movsx rax, word ptr [rax + off]' */ EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); break; case BPF_W: /* Emit 'movsx rax, dword ptr [rax+0x14]' */ EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); break; } emit_insn_suffix(&prog, src_reg, dst_reg, off); *pprog = prog; } static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) { u8 *prog = *pprog; switch (size) { case BPF_B: /* movzx rax, byte ptr [rax + r12 + off] */ EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6); break; case BPF_H: /* movzx rax, word ptr [rax + r12 + off] */ EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7); break; case BPF_W: /* mov eax, dword ptr [rax + r12 + off] */ EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B); break; case BPF_DW: /* mov rax, qword ptr [rax + r12 + off] */ EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B); break; } emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off); *pprog = prog; } static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) { emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); } /* STX: *(u8*)(dst_reg + off) = src_reg */ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) { u8 *prog = *pprog; switch (size) { case BPF_B: /* Emit 'mov byte ptr [rax + off], al' */ if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); else EMIT1(0x88); break; case BPF_H: if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); else EMIT2(0x66, 0x89); break; case BPF_W: if (is_ereg(dst_reg) || is_ereg(src_reg)) EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); else EMIT1(0x89); break; case BPF_DW: EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); break; } emit_insn_suffix(&prog, dst_reg, src_reg, off); *pprog = prog; } /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */ static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) { u8 *prog = *pprog; switch (size) { case BPF_B: /* mov byte ptr [rax + r12 + off], al */ EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88); break; case BPF_H: /* mov word ptr [rax + r12 + off], ax */ EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); break; case BPF_W: /* mov dword ptr [rax + r12 + 1], eax */ EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); break; case BPF_DW: /* mov qword ptr [rax + r12 + 1], rax */ EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89); break; } emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); *pprog = prog; } static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) { emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); } /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */ static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm) { u8 *prog = *pprog; switch (size) { case BPF_B: /* mov byte ptr [rax + r12 + off], imm8 */ EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6); break; case BPF_H: /* mov word ptr [rax + r12 + off], imm16 */ EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); break; case BPF_W: /* mov dword ptr [rax + r12 + 1], imm32 */ EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); break; case BPF_DW: /* mov qword ptr [rax + r12 + 1], imm32 */ EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7); break; } emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off); EMIT(imm, bpf_size_to_x86_bytes(size)); *pprog = prog; } static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm) { emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm); } static int emit_atomic(u8 **pprog, u8 atomic_op, u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) { u8 *prog = *pprog; EMIT1(0xF0); /* lock prefix */ maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); /* emit opcode */ switch (atomic_op) { case BPF_ADD: case BPF_AND: case BPF_OR: case BPF_XOR: /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ EMIT1(simple_alu_opcodes[atomic_op]); break; case BPF_ADD | BPF_FETCH: /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ EMIT2(0x0F, 0xC1); break; case BPF_XCHG: /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ EMIT1(0x87); break; case BPF_CMPXCHG: /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ EMIT2(0x0F, 0xB1); break; default: pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); return -EFAULT; } emit_insn_suffix(&prog, dst_reg, src_reg, off); *pprog = prog; return 0; } static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) { u8 *prog = *pprog; EMIT1(0xF0); /* lock prefix */ switch (size) { case BPF_W: EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg)); break; case BPF_DW: EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg)); break; default: pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n"); return -EFAULT; } /* emit opcode */ switch (atomic_op) { case BPF_ADD: case BPF_AND: case BPF_OR: case BPF_XOR: /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */ EMIT1(simple_alu_opcodes[atomic_op]); break; case BPF_ADD | BPF_FETCH: /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */ EMIT2(0x0F, 0xC1); break; case BPF_XCHG: /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */ EMIT1(0x87); break; case BPF_CMPXCHG: /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */ EMIT2(0x0F, 0xB1); break; default: pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); return -EFAULT; } emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); *pprog = prog; return 0; } #define DONT_CLEAR 1 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) { u32 reg = x->fixup >> 8; /* jump over faulting load and clear dest register */ if (reg != DONT_CLEAR) *(unsigned long *)((void *)regs + reg) = 0; regs->ip += x->fixup & 0xff; return true; } static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, bool *regs_used) { int i; for (i = 1; i <= insn_cnt; i++, insn++) { if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) regs_used[0] = true; if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) regs_used[1] = true; if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) regs_used[2] = true; if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) regs_used[3] = true; } } /* emit the 3-byte VEX prefix * * r: same as rex.r, extra bit for ModRM reg field * x: same as rex.x, extra bit for SIB index field * b: same as rex.b, extra bit for ModRM r/m, or SIB base * m: opcode map select, encoding escape bytes e.g. 0x0f38 * w: same as rex.w (32 bit or 64 bit) or opcode specific * src_reg2: additional source reg (encoded as BPF reg) * l: vector length (128 bit or 256 bit) or reserved * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) */ static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, bool w, u8 src_reg2, bool l, u8 pp) { u8 *prog = *pprog; const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ u8 b1, b2; u8 vvvv = reg2hex[src_reg2]; /* reg2hex gives only the lower 3 bit of vvvv */ if (is_ereg(src_reg2)) vvvv |= 1 << 3; /* * 2nd byte of 3-byte VEX prefix * ~ means bit inverted encoding * * 7 0 * +---+---+---+---+---+---+---+---+ * |~R |~X |~B | m | * +---+---+---+---+---+---+---+---+ */ b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); /* * 3rd byte of 3-byte VEX prefix * * 7 0 * +---+---+---+---+---+---+---+---+ * | W | ~vvvv | L | pp | * +---+---+---+---+---+---+---+---+ */ b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); EMIT3(b0, b1, b2); *pprog = prog; } /* emit BMI2 shift instruction */ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) { u8 *prog = *pprog; bool r = is_ereg(dst_reg); u8 m = 2; /* escape code 0f38 */ emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); *pprog = prog; } #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ #define RESTORE_TAIL_CALL_CNT(stack) \ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) { bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; struct bpf_insn *insn = bpf_prog->insnsi; bool callee_regs_used[4] = {}; int insn_cnt = bpf_prog->len; bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; u64 arena_vm_start, user_vm_start; int i, excnt = 0; int ilen, proglen = 0; u8 *prog = temp; int err; arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); detect_reg_usage(insn, insn_cnt, callee_regs_used); emit_prologue(&prog, bpf_prog->aux->stack_depth, bpf_prog_was_classic(bpf_prog), tail_call_reachable, bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb); /* Exception callback will clobber callee regs for its own use, and * restore the original callee regs from main prog's stack frame. */ if (bpf_prog->aux->exception_boundary) { /* We also need to save r12, which is not mapped to any BPF * register, as we throw after entry into the kernel, which may * overwrite r12. */ push_r12(&prog); push_callee_regs(&prog, all_callee_regs_used); } else { if (arena_vm_start) push_r12(&prog); push_callee_regs(&prog, callee_regs_used); } if (arena_vm_start) emit_mov_imm64(&prog, X86_REG_R12, arena_vm_start >> 32, (u32) arena_vm_start); ilen = prog - temp; if (rw_image) memcpy(rw_image + proglen, temp, ilen); proglen += ilen; addrs[0] = proglen; prog = temp; for (i = 1; i <= insn_cnt; i++, insn++) { const s32 imm32 = insn->imm; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; u8 b2 = 0, b3 = 0; u8 *start_of_ldx; s64 jmp_offset; s16 insn_off; u8 jmp_cond; u8 *func; int nops; switch (insn->code) { /* ALU */ case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU64 | BPF_ADD | BPF_X: case BPF_ALU64 | BPF_SUB | BPF_X: case BPF_ALU64 | BPF_AND | BPF_X: case BPF_ALU64 | BPF_OR | BPF_X: case BPF_ALU64 | BPF_XOR | BPF_X: maybe_emit_mod(&prog, dst_reg, src_reg, BPF_CLASS(insn->code) == BPF_ALU64); b2 = simple_alu_opcodes[BPF_OP(insn->code)]; EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); break; case BPF_ALU64 | BPF_MOV | BPF_X: if (insn_is_cast_user(insn)) { if (dst_reg != src_reg) /* 32-bit mov */ emit_mov_reg(&prog, false, dst_reg, src_reg); /* shl dst_reg, 32 */ maybe_emit_1mod(&prog, dst_reg, true); EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32); /* or dst_reg, user_vm_start */ maybe_emit_1mod(&prog, dst_reg, true); if (is_axreg(dst_reg)) EMIT1_off32(0x0D, user_vm_start >> 32); else EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32); /* rol dst_reg, 32 */ maybe_emit_1mod(&prog, dst_reg, true); EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32); /* xor r11, r11 */ EMIT3(0x4D, 0x31, 0xDB); /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */ maybe_emit_mod(&prog, dst_reg, dst_reg, false); EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); /* cmove r11, dst_reg; if so, set dst_reg to zero */ /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */ maybe_emit_mod(&prog, AUX_REG, dst_reg, true); EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg)); break; } else if (insn_is_mov_percpu_addr(insn)) { /* mov <dst>, <src> (if necessary) */ EMIT_mov(dst_reg, src_reg); #ifdef CONFIG_SMP /* add <dst>, gs:[<off>] */ EMIT2(0x65, add_1mod(0x48, dst_reg)); EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25); EMIT((u32)(unsigned long)&this_cpu_off, 4); #endif break; } fallthrough; case BPF_ALU | BPF_MOV | BPF_X: if (insn->off == 0) emit_mov_reg(&prog, BPF_CLASS(insn->code) == BPF_ALU64, dst_reg, src_reg); else emit_movsx_reg(&prog, insn->off, BPF_CLASS(insn->code) == BPF_ALU64, dst_reg, src_reg); break; /* neg dst */ case BPF_ALU | BPF_NEG: case BPF_ALU64 | BPF_NEG: maybe_emit_1mod(&prog, dst_reg, BPF_CLASS(insn->code) == BPF_ALU64); EMIT2(0xF7, add_1reg(0xD8, dst_reg)); break; case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_SUB | BPF_K: case BPF_ALU64 | BPF_AND | BPF_K: case BPF_ALU64 | BPF_OR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K: maybe_emit_1mod(&prog, dst_reg, BPF_CLASS(insn->code) == BPF_ALU64); /* * b3 holds 'normal' opcode, b2 short form only valid * in case dst is eax/rax. */ switch (BPF_OP(insn->code)) { case BPF_ADD: b3 = 0xC0; b2 = 0x05; break; case BPF_SUB: b3 = 0xE8; b2 = 0x2D; break; case BPF_AND: b3 = 0xE0; b2 = 0x25; break; case BPF_OR: b3 = 0xC8; b2 = 0x0D; break; case BPF_XOR: b3 = 0xF0; b2 = 0x35; break; } if (is_imm8(imm32)) EMIT3(0x83, add_1reg(b3, dst_reg), imm32); else if (is_axreg(dst_reg)) EMIT1_off32(b2, imm32); else EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); break; case BPF_ALU64 | BPF_MOV | BPF_K: case BPF_ALU | BPF_MOV | BPF_K: emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, dst_reg, imm32); break; case BPF_LD | BPF_IMM | BPF_DW: emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); insn++; i++; break; /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU64 | BPF_MOD | BPF_X: case BPF_ALU64 | BPF_DIV | BPF_X: case BPF_ALU64 | BPF_MOD | BPF_K: case BPF_ALU64 | BPF_DIV | BPF_K: { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; if (dst_reg != BPF_REG_0) EMIT1(0x50); /* push rax */ if (dst_reg != BPF_REG_3) EMIT1(0x52); /* push rdx */ if (BPF_SRC(insn->code) == BPF_X) { if (src_reg == BPF_REG_0 || src_reg == BPF_REG_3) { /* mov r11, src_reg */ EMIT_mov(AUX_REG, src_reg); src_reg = AUX_REG; } } else { /* mov r11, imm32 */ EMIT3_off32(0x49, 0xC7, 0xC3, imm32); src_reg = AUX_REG; } if (dst_reg != BPF_REG_0) /* mov rax, dst_reg */ emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); if (insn->off == 0) { /* * xor edx, edx * equivalent to 'xor rdx, rdx', but one byte less */ EMIT2(0x31, 0xd2); /* div src_reg */ maybe_emit_1mod(&prog, src_reg, is64); EMIT2(0xF7, add_1reg(0xF0, src_reg)); } else { if (BPF_CLASS(insn->code) == BPF_ALU) EMIT1(0x99); /* cdq */ else EMIT2(0x48, 0x99); /* cqo */ /* idiv src_reg */ maybe_emit_1mod(&prog, src_reg, is64); EMIT2(0xF7, add_1reg(0xF8, src_reg)); } if (BPF_OP(insn->code) == BPF_MOD && dst_reg != BPF_REG_3) /* mov dst_reg, rdx */ emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); else if (BPF_OP(insn->code) == BPF_DIV && dst_reg != BPF_REG_0) /* mov dst_reg, rax */ emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); if (dst_reg != BPF_REG_3) EMIT1(0x5A); /* pop rdx */ if (dst_reg != BPF_REG_0) EMIT1(0x58); /* pop rax */ break; } case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU64 | BPF_MUL | BPF_K: maybe_emit_mod(&prog, dst_reg, dst_reg, BPF_CLASS(insn->code) == BPF_ALU64); if (is_imm8(imm32)) /* imul dst_reg, dst_reg, imm8 */ EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), imm32); else /* imul dst_reg, dst_reg, imm32 */ EMIT2_off32(0x69, add_2reg(0xC0, dst_reg, dst_reg), imm32); break; case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU64 | BPF_MUL | BPF_X: maybe_emit_mod(&prog, src_reg, dst_reg, BPF_CLASS(insn->code) == BPF_ALU64); /* imul dst_reg, src_reg */ EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); break; /* Shifts */ case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU | BPF_ARSH | BPF_K: case BPF_ALU64 | BPF_LSH | BPF_K: case BPF_ALU64 | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: maybe_emit_1mod(&prog, dst_reg, BPF_CLASS(insn->code) == BPF_ALU64); b3 = simple_alu_opcodes[BPF_OP(insn->code)]; if (imm32 == 1) EMIT2(0xD1, add_1reg(b3, dst_reg)); else EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); break; case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_RSH | BPF_X: case BPF_ALU | BPF_ARSH | BPF_X: case BPF_ALU64 | BPF_LSH | BPF_X: case BPF_ALU64 | BPF_RSH | BPF_X: case BPF_ALU64 | BPF_ARSH | BPF_X: /* BMI2 shifts aren't better when shift count is already in rcx */ if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ bool w = (BPF_CLASS(insn->code) == BPF_ALU64); u8 op; switch (BPF_OP(insn->code)) { case BPF_LSH: op = 1; /* prefix 0x66 */ break; case BPF_RSH: op = 3; /* prefix 0xf2 */ break; case BPF_ARSH: op = 2; /* prefix 0xf3 */ break; } emit_shiftx(&prog, dst_reg, src_reg, w, op); break; } if (src_reg != BPF_REG_4) { /* common case */ /* Check for bad case when dst_reg == rcx */ if (dst_reg == BPF_REG_4) { /* mov r11, dst_reg */ EMIT_mov(AUX_REG, dst_reg); dst_reg = AUX_REG; } else { EMIT1(0x51); /* push rcx */ } /* mov rcx, src_reg */ EMIT_mov(BPF_REG_4, src_reg); } /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ maybe_emit_1mod(&prog, dst_reg, BPF_CLASS(insn->code) == BPF_ALU64); b3 = simple_alu_opcodes[BPF_OP(insn->code)]; EMIT2(0xD3, add_1reg(b3, dst_reg)); if (src_reg != BPF_REG_4) { if (insn->dst_reg == BPF_REG_4) /* mov dst_reg, r11 */ EMIT_mov(insn->dst_reg, AUX_REG); else EMIT1(0x59); /* pop rcx */ } break; case BPF_ALU | BPF_END | BPF_FROM_BE: case BPF_ALU64 | BPF_END | BPF_FROM_LE: switch (imm32) { case 16: /* Emit 'ror %ax, 8' to swap lower 2 bytes */ EMIT1(0x66); if (is_ereg(dst_reg)) EMIT1(0x41); EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); /* Emit 'movzwl eax, ax' */ if (is_ereg(dst_reg)) EMIT3(0x45, 0x0F, 0xB7); else EMIT2(0x0F, 0xB7); EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); break; case 32: /* Emit 'bswap eax' to swap lower 4 bytes */ if (is_ereg(dst_reg)) EMIT2(0x41, 0x0F); else EMIT1(0x0F); EMIT1(add_1reg(0xC8, dst_reg)); break; case 64: /* Emit 'bswap rax' to swap 8 bytes */ EMIT3(add_1mod(0x48, dst_reg), 0x0F, add_1reg(0xC8, dst_reg)); break; } break; case BPF_ALU | BPF_END | BPF_FROM_LE: switch (imm32) { case 16: /* * Emit 'movzwl eax, ax' to zero extend 16-bit * into 64 bit */ if (is_ereg(dst_reg)) EMIT3(0x45, 0x0F, 0xB7); else EMIT2(0x0F, 0xB7); EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); break; case 32: /* Emit 'mov eax, eax' to clear upper 32-bits */ if (is_ereg(dst_reg)) EMIT1(0x45); EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); break; case 64: /* nop */ break; } break; /* speculation barrier */ case BPF_ST | BPF_NOSPEC: EMIT_LFENCE(); break; /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: if (is_ereg(dst_reg)) EMIT2(0x41, 0xC6); else EMIT1(0xC6); goto st; case BPF_ST | BPF_MEM | BPF_H: if (is_ereg(dst_reg)) EMIT3(0x66, 0x41, 0xC7); else EMIT2(0x66, 0xC7); goto st; case BPF_ST | BPF_MEM | BPF_W: if (is_ereg(dst_reg)) EMIT2(0x41, 0xC7); else EMIT1(0xC7); goto st; case BPF_ST | BPF_MEM | BPF_DW: EMIT2(add_1mod(0x48, dst_reg), 0xC7); st: if (is_imm8(insn->off)) EMIT2(add_1reg(0x40, dst_reg), insn->off); else EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); break; /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_H: case BPF_STX | BPF_MEM | BPF_W: case BPF_STX | BPF_MEM | BPF_DW: emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); break; case BPF_ST | BPF_PROBE_MEM32 | BPF_B: case BPF_ST | BPF_PROBE_MEM32 | BPF_H: case BPF_ST | BPF_PROBE_MEM32 | BPF_W: case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: start_of_ldx = prog; emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm); goto populate_extable; /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: case BPF_STX | BPF_PROBE_MEM32 | BPF_B: case BPF_STX | BPF_PROBE_MEM32 | BPF_H: case BPF_STX | BPF_PROBE_MEM32 | BPF_W: case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: start_of_ldx = prog; if (BPF_CLASS(insn->code) == BPF_LDX) emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); else emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); populate_extable: { struct exception_table_entry *ex; u8 *_insn = image + proglen + (start_of_ldx - temp); s64 delta; if (!bpf_prog->aux->extable) break; if (excnt >= bpf_prog->aux->num_exentries) { pr_err("mem32 extable bug\n"); return -EFAULT; } ex = &bpf_prog->aux->extable[excnt++]; delta = _insn - (u8 *)&ex->insn; /* switch ex to rw buffer for writes */ ex = (void *)rw_image + ((void *)ex - (void *)image); ex->insn = delta; ex->data = EX_TYPE_BPF; ex->fixup = (prog - start_of_ldx) | ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8); } break; /* LDX: dst_reg = *(u8*)(src_reg + off) */ case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_PROBE_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_DW: case BPF_LDX | BPF_PROBE_MEM | BPF_DW: /* LDXS: dst_reg = *(s8*)(src_reg + off) */ case BPF_LDX | BPF_MEMSX | BPF_B: case BPF_LDX | BPF_MEMSX | BPF_H: case BPF_LDX | BPF_MEMSX | BPF_W: case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: insn_off = insn->off; if (BPF_MODE(insn->code) == BPF_PROBE_MEM || BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { /* Conservatively check that src_reg + insn->off is a kernel address: * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE * and * src_reg + insn->off < VSYSCALL_ADDR */ u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR; u8 *end_of_jmp; /* movabsq r10, VSYSCALL_ADDR */ emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32, (u32)(long)VSYSCALL_ADDR); /* mov src_reg, r11 */ EMIT_mov(AUX_REG, src_reg); if (insn->off) { /* add r11, insn->off */ maybe_emit_1mod(&prog, AUX_REG, true); EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); } /* sub r11, r10 */ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); /* movabsq r10, limit */ emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32, (u32)(long)limit); /* cmp r10, r11 */ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); /* if unsigned '>', goto load */ EMIT2(X86_JA, 0); end_of_jmp = prog; /* xor dst_reg, dst_reg */ emit_mov_imm32(&prog, false, dst_reg, 0); /* jmp byte_after_ldx */ EMIT2(0xEB, 0); /* populate jmp_offset for JAE above to jump to start_of_ldx */ start_of_ldx = prog; end_of_jmp[-1] = start_of_ldx - end_of_jmp; } if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || BPF_MODE(insn->code) == BPF_MEMSX) emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); else emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); if (BPF_MODE(insn->code) == BPF_PROBE_MEM || BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { struct exception_table_entry *ex; u8 *_insn = image + proglen + (start_of_ldx - temp); s64 delta; /* populate jmp_offset for JMP above */ start_of_ldx[-1] = prog - start_of_ldx; if (!bpf_prog->aux->extable) break; if (excnt >= bpf_prog->aux->num_exentries) { pr_err("ex gen bug\n"); return -EFAULT; } ex = &bpf_prog->aux->extable[excnt++]; delta = _insn - (u8 *)&ex->insn; if (!is_simm32(delta)) { pr_err("extable->insn doesn't fit into 32-bit\n"); return -EFAULT; } /* switch ex to rw buffer for writes */ ex = (void *)rw_image + ((void *)ex - (void *)image); ex->insn = delta; ex->data = EX_TYPE_BPF; if (dst_reg > BPF_REG_9) { pr_err("verifier error\n"); return -EFAULT; } /* * Compute size of x86 insn and its target dest x86 register. * ex_handler_bpf() will use lower 8 bits to adjust * pt_regs->ip to jump over this x86 instruction * and upper bits to figure out which pt_regs to zero out. * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" * of 4 bytes will be ignored and rbx will be zero inited. */ ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); } break; case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: if (insn->imm == (BPF_AND | BPF_FETCH) || insn->imm == (BPF_OR | BPF_FETCH) || insn->imm == (BPF_XOR | BPF_FETCH)) { bool is64 = BPF_SIZE(insn->code) == BPF_DW; u32 real_src_reg = src_reg; u32 real_dst_reg = dst_reg; u8 *branch_target; /* * Can't be implemented with a single x86 insn. * Need to do a CMPXCHG loop. */ /* Will need RAX as a CMPXCHG operand so save R0 */ emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); if (src_reg == BPF_REG_0) real_src_reg = BPF_REG_AX; if (dst_reg == BPF_REG_0) real_dst_reg = BPF_REG_AX; branch_target = prog; /* Load old value */ emit_ldx(&prog, BPF_SIZE(insn->code), BPF_REG_0, real_dst_reg, insn->off); /* * Perform the (commutative) operation locally, * put the result in the AUX_REG. */ emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], add_2reg(0xC0, AUX_REG, real_src_reg)); /* Attempt to swap in new value */ err = emit_atomic(&prog, BPF_CMPXCHG, real_dst_reg, AUX_REG, insn->off, BPF_SIZE(insn->code)); if (WARN_ON(err)) return err; /* * ZF tells us whether we won the race. If it's * cleared we need to try again. */ EMIT2(X86_JNE, -(prog - branch_target) - 2); /* Return the pre-modification value */ emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); /* Restore R0 after clobbering RAX */ emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); break; } err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, insn->off, BPF_SIZE(insn->code)); if (err) return err; break; case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: start_of_ldx = prog; err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code), dst_reg, src_reg, X86_REG_R12, insn->off); if (err) return err; goto populate_extable; /* call */ case BPF_JMP | BPF_CALL: { u8 *ip = image + addrs[i - 1]; func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); ip += 7; } if (!imm32) return -EINVAL; ip += x86_call_depth_emit_accounting(&prog, func, ip); if (emit_call(&prog, func, ip)) return -EINVAL; break; } case BPF_JMP | BPF_TAIL_CALL: if (imm32) emit_bpf_tail_call_direct(bpf_prog, &bpf_prog->aux->poke_tab[imm32 - 1], &prog, image + addrs[i - 1], callee_regs_used, bpf_prog->aux->stack_depth, ctx); else emit_bpf_tail_call_indirect(bpf_prog, &prog, callee_regs_used, bpf_prog->aux->stack_depth, image + addrs[i - 1], ctx); break; /* cond jump */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP32 | BPF_JEQ | BPF_X: case BPF_JMP32 | BPF_JNE | BPF_X: case BPF_JMP32 | BPF_JGT | BPF_X: case BPF_JMP32 | BPF_JLT | BPF_X: case BPF_JMP32 | BPF_JGE | BPF_X: case BPF_JMP32 | BPF_JLE | BPF_X: case BPF_JMP32 | BPF_JSGT | BPF_X: case BPF_JMP32 | BPF_JSLT | BPF_X: case BPF_JMP32 | BPF_JSGE | BPF_X: case BPF_JMP32 | BPF_JSLE | BPF_X: /* cmp dst_reg, src_reg */ maybe_emit_mod(&prog, dst_reg, src_reg, BPF_CLASS(insn->code) == BPF_JMP); EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_X: /* test dst_reg, src_reg */ maybe_emit_mod(&prog, dst_reg, src_reg, BPF_CLASS(insn->code) == BPF_JMP); EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_K: /* test dst_reg, imm32 */ maybe_emit_1mod(&prog, dst_reg, BPF_CLASS(insn->code) == BPF_JMP); EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP32 | BPF_JEQ | BPF_K: case BPF_JMP32 | BPF_JNE | BPF_K: case BPF_JMP32 | BPF_JGT | BPF_K: case BPF_JMP32 | BPF_JLT | BPF_K: case BPF_JMP32 | BPF_JGE | BPF_K: case BPF_JMP32 | BPF_JLE | BPF_K: case BPF_JMP32 | BPF_JSGT | BPF_K: case BPF_JMP32 | BPF_JSLT | BPF_K: case BPF_JMP32 | BPF_JSGE | BPF_K: case BPF_JMP32 | BPF_JSLE | BPF_K: /* test dst_reg, dst_reg to save one extra byte */ if (imm32 == 0) { maybe_emit_mod(&prog, dst_reg, dst_reg, BPF_CLASS(insn->code) == BPF_JMP); EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); goto emit_cond_jmp; } /* cmp dst_reg, imm8/32 */ maybe_emit_1mod(&prog, dst_reg, BPF_CLASS(insn->code) == BPF_JMP); if (is_imm8(imm32)) EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); else EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); emit_cond_jmp: /* Convert BPF opcode to x86 */ switch (BPF_OP(insn->code)) { case BPF_JEQ: jmp_cond = X86_JE; break; case BPF_JSET: case BPF_JNE: jmp_cond = X86_JNE; break; case BPF_JGT: /* GT is unsigned '>', JA in x86 */ jmp_cond = X86_JA; break; case BPF_JLT: /* LT is unsigned '<', JB in x86 */ jmp_cond = X86_JB; break; case BPF_JGE: /* GE is unsigned '>=', JAE in x86 */ jmp_cond = X86_JAE; break; case BPF_JLE: /* LE is unsigned '<=', JBE in x86 */ jmp_cond = X86_JBE; break; case BPF_JSGT: /* Signed '>', GT in x86 */ jmp_cond = X86_JG; break; case BPF_JSLT: /* Signed '<', LT in x86 */ jmp_cond = X86_JL; break; case BPF_JSGE: /* Signed '>=', GE in x86 */ jmp_cond = X86_JGE; break; case BPF_JSLE: /* Signed '<=', LE in x86 */ jmp_cond = X86_JLE; break; default: /* to silence GCC warning */ return -EFAULT; } jmp_offset = addrs[i + insn->off] - addrs[i]; if (is_imm8(jmp_offset)) { if (jmp_padding) { /* To keep the jmp_offset valid, the extra bytes are * padded before the jump insn, so we subtract the * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. * * If the previous pass already emits an imm8 * jmp_cond, then this BPF insn won't shrink, so * "nops" is 0. * * On the other hand, if the previous pass emits an * imm32 jmp_cond, the extra 4 bytes(*) is padded to * keep the image from shrinking further. * * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond * is 2 bytes, so the size difference is 4 bytes. */ nops = INSN_SZ_DIFF - 2; if (nops != 0 && nops != 4) { pr_err("unexpected jmp_cond padding: %d bytes\n", nops); return -EFAULT; } emit_nops(&prog, nops); } EMIT2(jmp_cond, jmp_offset); } else if (is_simm32(jmp_offset)) { EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); } else { pr_err("cond_jmp gen bug %llx\n", jmp_offset); return -EFAULT; } break; case BPF_JMP | BPF_JA: case BPF_JMP32 | BPF_JA: if (BPF_CLASS(insn->code) == BPF_JMP) { if (insn->off == -1) /* -1 jmp instructions will always jump * backwards two bytes. Explicitly handling * this case avoids wasting too many passes * when there are long sequences of replaced * dead code. */ jmp_offset = -2; else jmp_offset = addrs[i + insn->off] - addrs[i]; } else { if (insn->imm == -1) jmp_offset = -2; else jmp_offset = addrs[i + insn->imm] - addrs[i]; } if (!jmp_offset) { /* * If jmp_padding is enabled, the extra nops will * be inserted. Otherwise, optimize out nop jumps. */ if (jmp_padding) { /* There are 3 possible conditions. * (1) This BPF_JA is already optimized out in * the previous run, so there is no need * to pad any extra byte (0 byte). * (2) The previous pass emits an imm8 jmp, * so we pad 2 bytes to match the previous * insn size. * (3) Similarly, the previous pass emits an * imm32 jmp, and 5 bytes is padded. */ nops = INSN_SZ_DIFF; if (nops != 0 && nops != 2 && nops != 5) { pr_err("unexpected nop jump padding: %d bytes\n", nops); return -EFAULT; } emit_nops(&prog, nops); } break; } emit_jmp: if (is_imm8(jmp_offset)) { if (jmp_padding) { /* To avoid breaking jmp_offset, the extra bytes * are padded before the actual jmp insn, so * 2 bytes is subtracted from INSN_SZ_DIFF. * * If the previous pass already emits an imm8 * jmp, there is nothing to pad (0 byte). * * If it emits an imm32 jmp (5 bytes) previously * and now an imm8 jmp (2 bytes), then we pad * (5 - 2 = 3) bytes to stop the image from * shrinking further. */ nops = INSN_SZ_DIFF - 2; if (nops != 0 && nops != 3) { pr_err("unexpected jump padding: %d bytes\n", nops); return -EFAULT; } emit_nops(&prog, INSN_SZ_DIFF - 2); } EMIT2(0xEB, jmp_offset); } else if (is_simm32(jmp_offset)) { EMIT1_off32(0xE9, jmp_offset); } else { pr_err("jmp gen bug %llx\n", jmp_offset); return -EFAULT; } break; case BPF_JMP | BPF_EXIT: if (seen_exit) { jmp_offset = ctx->cleanup_addr - addrs[i]; goto emit_jmp; } seen_exit = true; /* Update cleanup_addr */ ctx->cleanup_addr = proglen; if (bpf_prog->aux->exception_boundary) { pop_callee_regs(&prog, all_callee_regs_used); pop_r12(&prog); } else { pop_callee_regs(&prog, callee_regs_used); if (arena_vm_start) pop_r12(&prog); } EMIT1(0xC9); /* leave */ emit_return(&prog, image + addrs[i - 1] + (prog - temp)); break; default: /* * By design x86-64 JIT should support all BPF instructions. * This error will be seen if new instruction was added * to the interpreter, but not to the JIT, or if there is * junk in bpf_prog. */ pr_err("bpf_jit: unknown opcode %02x\n", insn->code); return -EINVAL; } ilen = prog - temp; if (ilen > BPF_MAX_INSN_SIZE) { pr_err("bpf_jit: fatal insn size error\n"); return -EFAULT; } if (image) { /* * When populating the image, assert that: * * i) We do not write beyond the allocated space, and * ii) addrs[i] did not change from the prior run, in order * to validate assumptions made for computing branch * displacements. */ if (unlikely(proglen + ilen > oldproglen || proglen + ilen != addrs[i])) { pr_err("bpf_jit: fatal error\n"); return -EFAULT; } memcpy(rw_image + proglen, temp, ilen); } proglen += ilen; addrs[i] = proglen; prog = temp; } if (image && excnt != bpf_prog->aux->num_exentries) { pr_err("extable is not populated\n"); return -EFAULT; } return proglen; } static void clean_stack_garbage(const struct btf_func_model *m, u8 **pprog, int nr_stack_slots, int stack_size) { int arg_size, off; u8 *prog; /* Generally speaking, the compiler will pass the arguments * on-stack with "push" instruction, which will take 8-byte * on the stack. In this case, there won't be garbage values * while we copy the arguments from origin stack frame to current * in BPF_DW. * * However, sometimes the compiler will only allocate 4-byte on * the stack for the arguments. For now, this case will only * happen if there is only one argument on-stack and its size * not more than 4 byte. In this case, there will be garbage * values on the upper 4-byte where we store the argument on * current stack frame. * * arguments on origin stack: * * stack_arg_1(4-byte) xxx(4-byte) * * what we copy: * * stack_arg_1(8-byte): stack_arg_1(origin) xxx * * and the xxx is the garbage values which we should clean here. */ if (nr_stack_slots != 1) return; /* the size of the last argument */ arg_size = m->arg_size[m->nr_args - 1]; if (arg_size <= 4) { off = -(stack_size - 4); prog = *pprog; /* mov DWORD PTR [rbp + off], 0 */ if (!is_imm8(off)) EMIT2_off32(0xC7, 0x85, off); else EMIT3(0xC7, 0x45, off); EMIT(0, 4); *pprog = prog; } } /* get the count of the regs that are used to pass arguments */ static int get_nr_used_regs(const struct btf_func_model *m) { int i, arg_regs, nr_used_regs = 0; for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { arg_regs = (m->arg_size[i] + 7) / 8; if (nr_used_regs + arg_regs <= 6) nr_used_regs += arg_regs; if (nr_used_regs >= 6) break; } return nr_used_regs; } static void save_args(const struct btf_func_model *m, u8 **prog, int stack_size, bool for_call_origin) { int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; int i, j; /* Store function arguments to stack. * For a function that accepts two pointers the sequence will be: * mov QWORD PTR [rbp-0x10],rdi * mov QWORD PTR [rbp-0x8],rsi */ for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { arg_regs = (m->arg_size[i] + 7) / 8; /* According to the research of Yonghong, struct members * should be all in register or all on the stack. * Meanwhile, the compiler will pass the argument on regs * if the remaining regs can hold the argument. * * Disorder of the args can happen. For example: * * struct foo_struct { * long a; * int b; * }; * int foo(char, char, char, char, char, struct foo_struct, * char); * * the arg1-5,arg7 will be passed by regs, and arg6 will * by stack. */ if (nr_regs + arg_regs > 6) { /* copy function arguments from origin stack frame * into current stack frame. * * The starting address of the arguments on-stack * is: * rbp + 8(push rbp) + * 8(return addr of origin call) + * 8(return addr of the caller) * which means: rbp + 24 */ for (j = 0; j < arg_regs; j++) { emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, nr_stack_slots * 8 + 0x18); emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size); if (!nr_stack_slots) first_off = stack_size; stack_size -= 8; nr_stack_slots++; } } else { /* Only copy the arguments on-stack to current * 'stack_size' and ignore the regs, used to * prepare the arguments on-stack for origin call. */ if (for_call_origin) { nr_regs += arg_regs; continue; } /* copy the arguments from regs into stack */ for (j = 0; j < arg_regs; j++) { emit_stx(prog, BPF_DW, BPF_REG_FP, nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, -stack_size); stack_size -= 8; nr_regs++; } } } clean_stack_garbage(m, prog, nr_stack_slots, first_off); } static void restore_regs(const struct btf_func_model *m, u8 **prog, int stack_size) { int i, j, arg_regs, nr_regs = 0; /* Restore function arguments from stack. * For a function that accepts two pointers the sequence will be: * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] * * The logic here is similar to what we do in save_args() */ for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { arg_regs = (m->arg_size[i] + 7) / 8; if (nr_regs + arg_regs <= 6) { for (j = 0; j < arg_regs; j++) { emit_ldx(prog, BPF_DW, nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, BPF_REG_FP, -stack_size); stack_size -= 8; nr_regs++; } } else { stack_size -= 8 * arg_regs; } if (nr_regs >= 6) break; } } static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, struct bpf_tramp_link *l, int stack_size, int run_ctx_off, bool save_ret, void *image, void *rw_image) { u8 *prog = *pprog; u8 *jmp_insn; int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); struct bpf_prog *p = l->link.prog; u64 cookie = l->cookie; /* mov rdi, cookie */ emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); /* Prepare struct bpf_tramp_run_ctx. * * bpf_tramp_run_ctx is already preserved by * arch_prepare_bpf_trampoline(). * * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); /* arg1: mov rdi, progs[i] */ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); /* arg2: lea rsi, [rbp - ctx_cookie_off] */ if (!is_imm8(-run_ctx_off)) EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); else EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image))) return -EINVAL; /* remember prog start time returned by __bpf_prog_enter */ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); /* if (__bpf_prog_enter*(prog) == 0) * goto skip_exec_of_prog; */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ /* emit 2 nops that will be replaced with JE insn */ jmp_insn = prog; emit_nops(&prog, 2); /* arg1: lea rdi, [rbp - stack_size] */ if (!is_imm8(-stack_size)) EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); else EMIT4(0x48, 0x8D, 0x7D, -stack_size); /* arg2: progs[i]->insnsi for interpreter */ if (!p->jited) emit_mov_imm64(&prog, BPF_REG_2, (long) p->insnsi >> 32, (u32) (long) p->insnsi); /* call JITed bpf program or interpreter */ if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image))) return -EINVAL; /* * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return * of the previous call which is then passed on the stack to * the next BPF program. * * BPF_TRAMP_FENTRY trampoline may need to return the return * value of BPF_PROG_TYPE_STRUCT_OPS prog. */ if (save_ret) emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); /* replace 2 nops with JE insn, since jmp target is known */ jmp_insn[0] = X86_JE; jmp_insn[1] = prog - jmp_insn - 2; /* arg1: mov rdi, progs[i] */ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); /* arg2: mov rsi, rbx <- start time in nsec */ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); /* arg3: lea rdx, [rbp - run_ctx_off] */ if (!is_imm8(-run_ctx_off)) EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); else EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image))) return -EINVAL; *pprog = prog; return 0; } static void emit_align(u8 **pprog, u32 align) { u8 *target, *prog = *pprog; target = PTR_ALIGN(prog, align); if (target != prog) emit_nops(&prog, target - prog); *pprog = prog; } static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) { u8 *prog = *pprog; s64 offset; offset = func - (ip + 2 + 4); if (!is_simm32(offset)) { pr_err("Target %p is out of range\n", func); return -EINVAL; } EMIT2_off32(0x0F, jmp_cond + 0x10, offset); *pprog = prog; return 0; } static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, struct bpf_tramp_links *tl, int stack_size, int run_ctx_off, bool save_ret, void *image, void *rw_image) { int i; u8 *prog = *pprog; for (i = 0; i < tl->nr_links; i++) { if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, save_ret, image, rw_image)) return -EINVAL; } *pprog = prog; return 0; } static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, struct bpf_tramp_links *tl, int stack_size, int run_ctx_off, u8 **branches, void *image, void *rw_image) { u8 *prog = *pprog; int i; /* The first fmod_ret program will receive a garbage return value. * Set this to 0 to avoid confusing the program. */ emit_mov_imm32(&prog, false, BPF_REG_0, 0); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); for (i = 0; i < tl->nr_links; i++) { if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, image, rw_image)) return -EINVAL; /* mod_ret prog stored return value into [rbp - 8]. Emit: * if (*(u64 *)(rbp - 8) != 0) * goto do_fexit; */ /* cmp QWORD PTR [rbp - 0x8], 0x0 */ EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); /* Save the location of the branch and Generate 6 nops * (4 bytes for an offset and 2 bytes for the jump) These nops * are replaced with a conditional jump once do_fexit (i.e. the * start of the fexit invocation) is finalized. */ branches[i] = prog; emit_nops(&prog, 4 + 2); } *pprog = prog; return 0; } /* Example: * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); * its 'struct btf_func_model' will be nr_args=2 * The assembly code when eth_type_trans is executing after trampoline: * * push rbp * mov rbp, rsp * sub rsp, 16 // space for skb and dev * push rbx // temp regs to pass start time * mov qword ptr [rbp - 16], rdi // save skb pointer to stack * mov qword ptr [rbp - 8], rsi // save dev pointer to stack * call __bpf_prog_enter // rcu_read_lock and preempt_disable * mov rbx, rax // remember start time in bpf stats are enabled * lea rdi, [rbp - 16] // R1==ctx of bpf prog * call addr_of_jited_FENTRY_prog * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off * mov rsi, rbx // prog start time * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack * pop rbx * leave * ret * * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be * replaced with 'call generated_bpf_trampoline'. When it returns * eth_type_trans will continue executing with original skb and dev pointers. * * The assembly code when eth_type_trans is called from trampoline: * * push rbp * mov rbp, rsp * sub rsp, 24 // space for skb, dev, return value * push rbx // temp regs to pass start time * mov qword ptr [rbp - 24], rdi // save skb pointer to stack * mov qword ptr [rbp - 16], rsi // save dev pointer to stack * call __bpf_prog_enter // rcu_read_lock and preempt_disable * mov rbx, rax // remember start time if bpf stats are enabled * lea rdi, [rbp - 24] // R1==ctx of bpf prog * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off * mov rsi, rbx // prog start time * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack * call eth_type_trans+5 // execute body of eth_type_trans * mov qword ptr [rbp - 8], rax // save return value * call __bpf_prog_enter // rcu_read_lock and preempt_disable * mov rbx, rax // remember start time in bpf stats are enabled * lea rdi, [rbp - 24] // R1==ctx of bpf prog * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off * mov rsi, rbx // prog start time * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value * pop rbx * leave * add rsp, 8 // skip eth_type_trans's frame * ret // return to its caller */ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, void *rw_image_end, void *image, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { int i, ret, nr_regs = m->nr_args, stack_size = 0; int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; void *orig_call = func_addr; u8 **branches = NULL; u8 *prog; bool save_ret; /* * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG * because @func_addr. */ WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) && (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET))); /* extra registers for struct arguments */ for (i = 0; i < m->nr_args; i++) { if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) nr_regs += (m->arg_size[i] + 7) / 8 - 1; } /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 * are passed through regs, the remains are through stack. */ if (nr_regs > MAX_BPF_FUNC_ARGS) return -ENOTSUPP; /* Generated trampoline stack layout: * * RBP + 8 [ return address ] * RBP + 0 [ RBP ] * * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or * BPF_TRAMP_F_RET_FENTRY_RET flags * * [ reg_argN ] always * [ ... ] * RBP - regs_off [ reg_arg1 ] program's ctx pointer * * RBP - nregs_off [ regs count ] always * * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag * * RBP - rbx_off [ rbx value ] always * * RBP - run_ctx_off [ bpf_tramp_run_ctx ] * * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG * [ ... ] * [ stack_arg2 ] * RBP - arg_stack_off [ stack_arg1 ] * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX */ /* room for return value of orig_call or fentry prog */ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); if (save_ret) stack_size += 8; stack_size += nr_regs * 8; regs_off = stack_size; /* regs count */ stack_size += 8; nregs_off = stack_size; if (flags & BPF_TRAMP_F_IP_ARG) stack_size += 8; /* room for IP address argument */ ip_off = stack_size; stack_size += 8; rbx_off = stack_size; stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; run_ctx_off = stack_size; if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { /* the space that used to pass arguments on-stack */ stack_size += (nr_regs - get_nr_used_regs(m)) * 8; /* make sure the stack pointer is 16-byte aligned if we * need pass arguments on stack, which means * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] * should be 16-byte aligned. Following code depend on * that stack_size is already 8-byte aligned. */ stack_size += (stack_size % 16) ? 0 : 8; } arg_stack_off = stack_size; if (flags & BPF_TRAMP_F_SKIP_FRAME) { /* skip patched call instruction and point orig_call to actual * body of the kernel function. */ if (is_endbr(*(u32 *)orig_call)) orig_call += ENDBR_INSN_SIZE; orig_call += X86_PATCH_SIZE; } prog = rw_image; if (flags & BPF_TRAMP_F_INDIRECT) { /* * Indirect call for bpf_struct_ops */ emit_cfi(&prog, cfi_get_func_hash(func_addr)); } else { /* * Direct-call fentry stub, as such it needs accounting for the * __fentry__ call. */ x86_call_depth_emit_accounting(&prog, NULL, image); } EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ if (!is_imm8(stack_size)) { /* sub rsp, stack_size */ EMIT3_off32(0x48, 0x81, 0xEC, stack_size); } else { /* sub rsp, stack_size */ EMIT4(0x48, 0x83, 0xEC, stack_size); } if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) EMIT1(0x50); /* push rax */ /* mov QWORD PTR [rbp - rbx_off], rbx */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); /* Store number of argument registers of the traced function: * mov rax, nr_regs * mov QWORD PTR [rbp - nregs_off], rax */ emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); if (flags & BPF_TRAMP_F_IP_ARG) { /* Store IP address of the traced function: * movabsq rax, func_addr * mov QWORD PTR [rbp - ip_off], rax */ emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); } save_args(m, &prog, regs_off, false); if (flags & BPF_TRAMP_F_CALL_ORIG) { /* arg1: mov rdi, im */ emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); if (emit_rsb_call(&prog, __bpf_tramp_enter, image + (prog - (u8 *)rw_image))) { ret = -EINVAL; goto cleanup; } } if (fentry->nr_links) { if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image)) return -EINVAL; } if (fmod_ret->nr_links) { branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), GFP_KERNEL); if (!branches) return -ENOMEM; if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, run_ctx_off, branches, image, rw_image)) { ret = -EINVAL; goto cleanup; } } if (flags & BPF_TRAMP_F_CALL_ORIG) { restore_regs(m, &prog, regs_off); save_args(m, &prog, arg_stack_off, true); if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { /* Before calling the original function, restore the * tail_call_cnt from stack to rax. */ RESTORE_TAIL_CALL_CNT(stack_size); } if (flags & BPF_TRAMP_F_ORIG_STACK) { emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); EMIT2(0xff, 0xd3); /* call *rbx */ } else { /* call original function */ if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) { ret = -EINVAL; goto cleanup; } } /* remember return value in a stack for bpf prog to access */ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); im->ip_after_call = image + (prog - (u8 *)rw_image); emit_nops(&prog, X86_PATCH_SIZE); } if (fmod_ret->nr_links) { /* From Intel 64 and IA-32 Architectures Optimization * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler * Coding Rule 11: All branch targets should be 16-byte * aligned. */ emit_align(&prog, 16); /* Update the branches saved in invoke_bpf_mod_ret with the * aligned address of do_fexit. */ for (i = 0; i < fmod_ret->nr_links; i++) { emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image), image + (branches[i] - (u8 *)rw_image), X86_JNE); } } if (fexit->nr_links) { if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false, image, rw_image)) { ret = -EINVAL; goto cleanup; } } if (flags & BPF_TRAMP_F_RESTORE_REGS) restore_regs(m, &prog, regs_off); /* This needs to be done regardless. If there were fmod_ret programs, * the return value is only updated on the stack and still needs to be * restored to R0. */ if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = image + (prog - (u8 *)rw_image); /* arg1: mov rdi, im */ emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) { ret = -EINVAL; goto cleanup; } } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { /* Before running the original function, restore the * tail_call_cnt from stack to rax. */ RESTORE_TAIL_CALL_CNT(stack_size); } /* restore return value of orig_call or fentry prog back into RAX */ if (save_ret) emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); EMIT1(0xC9); /* leave */ if (flags & BPF_TRAMP_F_SKIP_FRAME) { /* skip our return address and return to parent */ EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ } emit_return(&prog, image + (prog - (u8 *)rw_image)); /* Make sure the trampoline generation logic doesn't overflow */ if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) { ret = -EFAULT; goto cleanup; } ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY; cleanup: kfree(branches); return ret; } void *arch_alloc_bpf_trampoline(unsigned int size) { return bpf_prog_pack_alloc(size, jit_fill_hole); } void arch_free_bpf_trampoline(void *image, unsigned int size) { bpf_prog_pack_free(image, size); } int arch_protect_bpf_trampoline(void *image, unsigned int size) { return 0; } int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { void *rw_image, *tmp; int ret; u32 size = image_end - image; /* rw_image doesn't need to be in module memory range, so we can * use kvmalloc. */ rw_image = kvmalloc(size, GFP_KERNEL); if (!rw_image) return -ENOMEM; ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, flags, tlinks, func_addr); if (ret < 0) goto out; tmp = bpf_arch_text_copy(image, rw_image, size); if (IS_ERR(tmp)) ret = PTR_ERR(tmp); out: kvfree(rw_image); return ret; } int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { struct bpf_tramp_image im; void *image; int ret; /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline(). * This will NOT cause fragmentation in direct map, as we do not * call set_memory_*() on this buffer. * * We cannot use kvmalloc here, because we need image to be in * module memory range. */ image = bpf_jit_alloc_exec(PAGE_SIZE); if (!image) return -ENOMEM; ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image, m, flags, tlinks, func_addr); bpf_jit_free_exec(image); return ret; } static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) { u8 *jg_reloc, *prog = *pprog; int pivot, err, jg_bytes = 1; s64 jg_offset; if (a == b) { /* Leaf node of recursion, i.e. not a range of indices * anymore. */ EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ if (!is_simm32(progs[a])) return -1; EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a]); err = emit_cond_near_jump(&prog, /* je func */ (void *)progs[a], image + (prog - buf), X86_JE); if (err) return err; emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); *pprog = prog; return 0; } /* Not a leaf node, so we pivot, and recursively descend into * the lower and upper ranges. */ pivot = (b - a) / 2; EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ if (!is_simm32(progs[a + pivot])) return -1; EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); if (pivot > 2) { /* jg upper_part */ /* Require near jump. */ jg_bytes = 4; EMIT2_off32(0x0F, X86_JG + 0x10, 0); } else { EMIT2(X86_JG, 0); } jg_reloc = prog; err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ progs, image, buf); if (err) return err; /* From Intel 64 and IA-32 Architectures Optimization * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler * Coding Rule 11: All branch targets should be 16-byte * aligned. */ emit_align(&prog, 16); jg_offset = prog - jg_reloc; emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ b, progs, image, buf); if (err) return err; *pprog = prog; return 0; } static int cmp_ips(const void *a, const void *b) { const s64 *ipa = a; const s64 *ipb = b; if (*ipa > *ipb) return 1; if (*ipa < *ipb) return -1; return 0; } int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) { u8 *prog = buf; sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); } struct x64_jit_data { struct bpf_binary_header *rw_header; struct bpf_binary_header *header; int *addrs; u8 *image; int proglen; struct jit_context ctx; }; #define MAX_PASSES 20 #define PADDING_PASSES (MAX_PASSES - 5) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *rw_header = NULL; struct bpf_binary_header *header = NULL; struct bpf_prog *tmp, *orig_prog = prog; struct x64_jit_data *jit_data; int proglen, oldproglen = 0; struct jit_context ctx = {}; bool tmp_blinded = false; bool extra_pass = false; bool padding = false; u8 *rw_image = NULL; u8 *image = NULL; int *addrs; int pass; int i; if (!prog->jit_requested) return orig_prog; tmp = bpf_jit_blind_constants(prog); /* * If blinding was requested and we failed during blinding, * we must fall back to the interpreter. */ if (IS_ERR(tmp)) return orig_prog; if (tmp != prog) { tmp_blinded = true; prog = tmp; } jit_data = prog->aux->jit_data; if (!jit_data) { jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); if (!jit_data) { prog = orig_prog; goto out; } prog->aux->jit_data = jit_data; } addrs = jit_data->addrs; if (addrs) { ctx = jit_data->ctx; oldproglen = jit_data->proglen; image = jit_data->image; header = jit_data->header; rw_header = jit_data->rw_header; rw_image = (void *)rw_header + ((void *)image - (void *)header); extra_pass = true; padding = true; goto skip_init_addrs; } addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); if (!addrs) { prog = orig_prog; goto out_addrs; } /* * Before first pass, make a rough estimation of addrs[] * each BPF instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i <= prog->len; i++) { proglen += 64; addrs[i] = proglen; } ctx.cleanup_addr = proglen; skip_init_addrs: /* * JITed image shrinks with every pass and the loop iterates * until the image stops shrinking. Very large BPF programs * may converge on the last pass. In such case do one more * pass to emit the final image. */ for (pass = 0; pass < MAX_PASSES || image; pass++) { if (!padding && pass >= PADDING_PASSES) padding = true; proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); if (proglen <= 0) { out_image: image = NULL; if (header) { bpf_arch_text_copy(&header->size, &rw_header->size, sizeof(rw_header->size)); bpf_jit_binary_pack_free(header, rw_header); } /* Fall back to interpreter mode */ prog = orig_prog; if (extra_pass) { prog->bpf_func = NULL; prog->jited = 0; prog->jited_len = 0; } goto out_addrs; } if (image) { if (proglen != oldproglen) { pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", proglen, oldproglen); goto out_image; } break; } if (proglen == oldproglen) { /* * The number of entries in extable is the number of BPF_LDX * insns that access kernel memory via "pointer to BTF type". * The verifier changed their opcode from LDX|MEM|size * to LDX|PROBE_MEM|size to make JITing easier. */ u32 align = __alignof__(struct exception_table_entry); u32 extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry); /* allocate module memory for x86 insns and extable */ header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, &image, align, &rw_header, &rw_image, jit_fill_hole); if (!header) { prog = orig_prog; goto out_addrs; } prog->aux->extable = (void *) image + roundup(proglen, align); } oldproglen = proglen; cond_resched(); } if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); if (image) { if (!prog->is_func || extra_pass) { /* * bpf_jit_binary_pack_finalize fails in two scenarios: * 1) header is not pointing to proper module memory; * 2) the arch doesn't support bpf_arch_text_copy(). * * Both cases are serious bugs and justify WARN_ON. */ if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) { /* header has been freed */ header = NULL; goto out_image; } bpf_tail_call_direct_fixup(prog); } else { jit_data->addrs = addrs; jit_data->ctx = ctx; jit_data->proglen = proglen; jit_data->image = image; jit_data->header = header; jit_data->rw_header = rw_header; } /* * ctx.prog_offset is used when CFI preambles put code *before* * the function. See emit_cfi(). For FineIBT specifically this code * can also be executed and bpf_prog_kallsyms_add() will * generate an additional symbol to cover this, hence also * decrement proglen. */ prog->bpf_func = (void *)image + cfi_get_offset(); prog->jited = 1; prog->jited_len = proglen - cfi_get_offset(); } else { prog = orig_prog; } if (!image || !prog->is_func || extra_pass) { if (image) bpf_prog_fill_jited_linfo(prog, addrs + 1); out_addrs: kvfree(addrs); kfree(jit_data); prog->aux->jit_data = NULL; } out: if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); return prog; } bool bpf_jit_supports_kfunc_call(void) { return true; } void *bpf_arch_text_copy(void *dst, void *src, size_t len) { if (text_poke_copy(dst, src, len) == NULL) return ERR_PTR(-EINVAL); return dst; } /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ bool bpf_jit_supports_subprog_tailcalls(void) { return true; } bool bpf_jit_supports_percpu_insn(void) { return true; } void bpf_jit_free(struct bpf_prog *prog) { if (prog->jited) { struct x64_jit_data *jit_data = prog->aux->jit_data; struct bpf_binary_header *hdr; /* * If we fail the final pass of JIT (from jit_subprogs), * the program may not be finalized yet. Call finalize here * before freeing it. */ if (jit_data) { bpf_jit_binary_pack_finalize(jit_data->header, jit_data->rw_header); kvfree(jit_data->addrs); kfree(jit_data); } prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); hdr = bpf_jit_binary_pack_hdr(prog); bpf_jit_binary_pack_free(hdr, NULL); WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); } bpf_prog_unlock_free(prog); } bool bpf_jit_supports_exceptions(void) { /* We unwind through both kernel frames (starting from within bpf_throw * call) and BPF frames. Therefore we require ORC unwinder to be enabled * to walk kernel frames and reach BPF frames in the stack trace. */ return IS_ENABLED(CONFIG_UNWINDER_ORC); } void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) { #if defined(CONFIG_UNWINDER_ORC) struct unwind_state state; unsigned long addr; for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp)) break; } return; #endif WARN(1, "verification of programs using bpf_throw should have failed\n"); } void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, struct bpf_prog *new, struct bpf_prog *old) { u8 *old_addr, *new_addr, *old_bypass_addr; int ret; old_bypass_addr = old ? NULL : poke->bypass_addr; old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; /* * On program loading or teardown, the program's kallsym entry * might not be in place, so we use __bpf_arch_text_poke to skip * the kallsyms check. */ if (new) { ret = __bpf_arch_text_poke(poke->tailcall_target, BPF_MOD_JUMP, old_addr, new_addr); BUG_ON(ret < 0); if (!old) { ret = __bpf_arch_text_poke(poke->tailcall_bypass, BPF_MOD_JUMP, poke->bypass_addr, NULL); BUG_ON(ret < 0); } } else { ret = __bpf_arch_text_poke(poke->tailcall_bypass, BPF_MOD_JUMP, old_bypass_addr, poke->bypass_addr); BUG_ON(ret < 0); /* let other CPUs finish the execution of program * so that it will not possible to expose them * to invalid nop, stack unwind, nop state */ if (!ret) synchronize_rcu(); ret = __bpf_arch_text_poke(poke->tailcall_target, BPF_MOD_JUMP, old_addr, NULL); BUG_ON(ret < 0); } } bool bpf_jit_supports_arena(void) { return true; } bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) { if (!in_arena) return true; switch (insn->code) { case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: if (insn->imm == (BPF_AND | BPF_FETCH) || insn->imm == (BPF_OR | BPF_FETCH) || insn->imm == (BPF_XOR | BPF_FETCH)) return false; } return true; } bool bpf_jit_supports_ptr_xchg(void) { return true; } /* x86-64 JIT emits its own code to filter user addresses so return 0 here */ u64 bpf_arch_uaddress_limit(void) { return 0; }
10 10 6 6 6 6 6 6 1 1 8 1 7 8 2 2 1 3 12 5 1 3 1 2 13 12 1 3 8 1 1 1 1 1 1 1 2 11 1 2 1 3 1 2 2 7 10 1 4 3 2 2 2 1 1 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 // SPDX-License-Identifier: GPL-2.0-only /* * File: pep.c * * Phonet pipe protocol end point socket * * Copyright (C) 2008 Nokia Corporation. * * Author: Rémi Denis-Courmont */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/socket.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/ioctls.h> #include <linux/phonet.h> #include <linux/module.h> #include <net/phonet/phonet.h> #include <net/phonet/pep.h> #include <net/phonet/gprs.h> /* sk_state values: * TCP_CLOSE sock not in use yet * TCP_CLOSE_WAIT disconnected pipe * TCP_LISTEN listening pipe endpoint * TCP_SYN_RECV connected pipe in disabled state * TCP_ESTABLISHED connected pipe in enabled state * * pep_sock locking: * - sk_state, hlist: sock lock needed * - listener: read only * - pipe_handle: read only */ #define CREDITS_MAX 10 #define CREDITS_THR 7 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */ /* Get the next TLV sub-block. */ static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen, void *buf) { void *data = NULL; struct { u8 sb_type; u8 sb_len; } *ph, h; int buflen = *plen; ph = skb_header_pointer(skb, 0, 2, &h); if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len)) return NULL; ph->sb_len -= 2; *ptype = ph->sb_type; *plen = ph->sb_len; if (buflen > ph->sb_len) buflen = ph->sb_len; data = skb_header_pointer(skb, 2, buflen, buf); __skb_pull(skb, 2 + ph->sb_len); return data; } static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload, int len, gfp_t priority) { struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority); if (!skb) return NULL; skb_set_owner_w(skb, sk); skb_reserve(skb, MAX_PNPIPE_HEADER); __skb_put(skb, len); skb_copy_to_linear_data(skb, payload, len); __skb_push(skb, sizeof(struct pnpipehdr)); skb_reset_transport_header(skb); return skb; } static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code, const void *data, int len, gfp_t priority) { const struct pnpipehdr *oph = pnp_hdr(oskb); struct pnpipehdr *ph; struct sk_buff *skb; struct sockaddr_pn peer; skb = pep_alloc_skb(sk, data, len, priority); if (!skb) return -ENOMEM; ph = pnp_hdr(skb); ph->utid = oph->utid; ph->message_id = oph->message_id + 1; /* REQ -> RESP */ ph->pipe_handle = oph->pipe_handle; ph->error_code = code; pn_skb_get_src_sockaddr(oskb, &peer); return pn_skb_send(sk, skb, &peer); } static int pep_indicate(struct sock *sk, u8 id, u8 code, const void *data, int len, gfp_t priority) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *ph; struct sk_buff *skb; skb = pep_alloc_skb(sk, data, len, priority); if (!skb) return -ENOMEM; ph = pnp_hdr(skb); ph->utid = 0; ph->message_id = id; ph->pipe_handle = pn->pipe_handle; ph->error_code = code; return pn_skb_send(sk, skb, NULL); } #define PAD 0x00 static int pipe_handler_request(struct sock *sk, u8 id, u8 code, const void *data, int len) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *ph; struct sk_buff *skb; skb = pep_alloc_skb(sk, data, len, GFP_KERNEL); if (!skb) return -ENOMEM; ph = pnp_hdr(skb); ph->utid = id; /* whatever */ ph->message_id = id; ph->pipe_handle = pn->pipe_handle; ph->error_code = code; return pn_skb_send(sk, skb, NULL); } static int pipe_handler_send_created_ind(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); u8 data[4] = { PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2), pn->tx_fc, pn->rx_fc, }; return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */, data, 4, GFP_ATOMIC); } static int pep_accept_conn(struct sock *sk, struct sk_buff *skb) { static const u8 data[20] = { PAD, PAD, PAD, 2 /* sub-blocks */, PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD, PN_MULTI_CREDIT_FLOW_CONTROL, PN_ONE_CREDIT_FLOW_CONTROL, PN_LEGACY_FLOW_CONTROL, PAD, PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD, PN_MULTI_CREDIT_FLOW_CONTROL, PN_ONE_CREDIT_FLOW_CONTROL, PN_LEGACY_FLOW_CONTROL, PAD, }; might_sleep(); return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data), GFP_KERNEL); } static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code, gfp_t priority) { static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ }; WARN_ON(code == PN_PIPE_NO_ERROR); return pep_reply(sk, skb, code, data, sizeof(data), priority); } /* Control requests are not sent by the pipe service and have a specific * message format. */ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, gfp_t priority) { const struct pnpipehdr *oph = pnp_hdr(oskb); struct sk_buff *skb; struct pnpipehdr *ph; struct sockaddr_pn dst; u8 data[4] = { oph->pep_type, /* PEP type */ code, /* error code, at an unusual offset */ PAD, PAD, }; skb = pep_alloc_skb(sk, data, 4, priority); if (!skb) return -ENOMEM; ph = pnp_hdr(skb); ph->utid = oph->utid; ph->message_id = PNS_PEP_CTRL_RESP; ph->pipe_handle = oph->pipe_handle; ph->data0 = oph->data[0]; /* CTRL id */ pn_skb_get_src_sockaddr(oskb, &dst); return pn_skb_send(sk, skb, &dst); } static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority) { u8 data[4] = { type, PAD, PAD, status }; return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON, data, 4, priority); } /* Send our RX flow control information to the sender. * Socket must be locked. */ static void pipe_grant_credits(struct sock *sk, gfp_t priority) { struct pep_sock *pn = pep_sk(sk); BUG_ON(sk->sk_state != TCP_ESTABLISHED); switch (pn->rx_fc) { case PN_LEGACY_FLOW_CONTROL: /* TODO */ break; case PN_ONE_CREDIT_FLOW_CONTROL: if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL, PEP_IND_READY, priority) == 0) pn->rx_credits = 1; break; case PN_MULTI_CREDIT_FLOW_CONTROL: if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX) break; if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS, CREDITS_MAX - pn->rx_credits, priority) == 0) pn->rx_credits = CREDITS_MAX; break; } } static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *hdr; int wake = 0; if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) return -EINVAL; hdr = pnp_hdr(skb); if (hdr->pep_type != PN_PEP_TYPE_COMMON) { net_dbg_ratelimited("Phonet unknown PEP type: %u\n", (unsigned int)hdr->pep_type); return -EOPNOTSUPP; } switch (hdr->data[0]) { case PN_PEP_IND_FLOW_CONTROL: switch (pn->tx_fc) { case PN_LEGACY_FLOW_CONTROL: switch (hdr->data[3]) { case PEP_IND_BUSY: atomic_set(&pn->tx_credits, 0); break; case PEP_IND_READY: atomic_set(&pn->tx_credits, wake = 1); break; } break; case PN_ONE_CREDIT_FLOW_CONTROL: if (hdr->data[3] == PEP_IND_READY) atomic_set(&pn->tx_credits, wake = 1); break; } break; case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) break; atomic_add(wake = hdr->data[3], &pn->tx_credits); break; default: net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", (unsigned int)hdr->data[0]); return -EOPNOTSUPP; } if (wake) sk->sk_write_space(sk); return 0; } static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *hdr = pnp_hdr(skb); u8 n_sb = hdr->data0; pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; __skb_pull(skb, sizeof(*hdr)); while (n_sb > 0) { u8 type, buf[2], len = sizeof(buf); u8 *data = pep_get_sb(skb, &type, &len, buf); if (data == NULL) return -EINVAL; switch (type) { case PN_PIPE_SB_NEGOTIATED_FC: if (len < 2 || (data[0] | data[1]) > 3) break; pn->tx_fc = data[0] & 3; pn->rx_fc = data[1] & 3; break; } n_sb--; } return 0; } /* Queue an skb to a connected sock. * Socket lock must be held. */ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *hdr = pnp_hdr(skb); struct sk_buff_head *queue; int err = 0; BUG_ON(sk->sk_state == TCP_CLOSE_WAIT); switch (hdr->message_id) { case PNS_PEP_CONNECT_REQ: pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC); break; case PNS_PEP_DISCONNECT_REQ: pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); sk->sk_state = TCP_CLOSE_WAIT; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); break; case PNS_PEP_ENABLE_REQ: /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */ pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); break; case PNS_PEP_RESET_REQ: switch (hdr->state_after_reset) { case PN_PIPE_DISABLE: pn->init_enable = 0; break; case PN_PIPE_ENABLE: pn->init_enable = 1; break; default: /* not allowed to send an error here!? */ err = -EINVAL; goto out; } fallthrough; case PNS_PEP_DISABLE_REQ: atomic_set(&pn->tx_credits, 0); pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); break; case PNS_PEP_CTRL_REQ: if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { atomic_inc(&sk->sk_drops); break; } __skb_pull(skb, 4); queue = &pn->ctrlreq_queue; goto queue; case PNS_PIPE_ALIGNED_DATA: __skb_pull(skb, 1); fallthrough; case PNS_PIPE_DATA: __skb_pull(skb, 3); /* Pipe data header */ if (!pn_flow_safe(pn->rx_fc)) { err = sock_queue_rcv_skb(sk, skb); if (!err) return NET_RX_SUCCESS; err = -ENOBUFS; break; } if (pn->rx_credits == 0) { atomic_inc(&sk->sk_drops); err = -ENOBUFS; break; } pn->rx_credits--; queue = &sk->sk_receive_queue; goto queue; case PNS_PEP_STATUS_IND: pipe_rcv_status(sk, skb); break; case PNS_PIPE_REDIRECTED_IND: err = pipe_rcv_created(sk, skb); break; case PNS_PIPE_CREATED_IND: err = pipe_rcv_created(sk, skb); if (err) break; fallthrough; case PNS_PIPE_RESET_IND: if (!pn->init_enable) break; fallthrough; case PNS_PIPE_ENABLED_IND: if (!pn_flow_safe(pn->tx_fc)) { atomic_set(&pn->tx_credits, 1); sk->sk_write_space(sk); } if (sk->sk_state == TCP_ESTABLISHED) break; /* Nothing to do */ sk->sk_state = TCP_ESTABLISHED; pipe_grant_credits(sk, GFP_ATOMIC); break; case PNS_PIPE_DISABLED_IND: sk->sk_state = TCP_SYN_RECV; pn->rx_credits = 0; break; default: net_dbg_ratelimited("Phonet unknown PEP message: %u\n", hdr->message_id); err = -EINVAL; } out: kfree_skb(skb); return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS; queue: skb->dev = NULL; skb_set_owner_r(skb, sk); skb_queue_tail(queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); return NET_RX_SUCCESS; } /* Destroy connected sock. */ static void pipe_destruct(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&pn->ctrlreq_queue); } static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n) { unsigned int i; u8 final_fc = PN_NO_FLOW_CONTROL; for (i = 0; i < n; i++) { u8 fc = fcs[i]; if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL) final_fc = fc; } return final_fc; } static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *hdr; u8 n_sb; if (!pskb_pull(skb, sizeof(*hdr) + 4)) return -EINVAL; hdr = pnp_hdr(skb); if (hdr->error_code != PN_PIPE_NO_ERROR) return -ECONNREFUSED; /* Parse sub-blocks */ n_sb = hdr->data[3]; while (n_sb > 0) { u8 type, buf[6], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); if (data == NULL) return -EINVAL; switch (type) { case PN_PIPE_SB_REQUIRED_FC_TX: if (len < 2 || len < data[0]) break; pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2); break; case PN_PIPE_SB_PREFERRED_FC_RX: if (len < 2 || len < data[0]) break; pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2); break; } n_sb--; } return pipe_handler_send_created_ind(sk); } static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb) { struct pnpipehdr *hdr = pnp_hdr(skb); if (hdr->error_code != PN_PIPE_NO_ERROR) return -ECONNREFUSED; return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */, NULL, 0, GFP_ATOMIC); } static void pipe_start_flow_control(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); if (!pn_flow_safe(pn->tx_fc)) { atomic_set(&pn->tx_credits, 1); sk->sk_write_space(sk); } pipe_grant_credits(sk, GFP_ATOMIC); } /* Queue an skb to an actively connected sock. * Socket lock must be held. */ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *hdr = pnp_hdr(skb); int err = NET_RX_SUCCESS; switch (hdr->message_id) { case PNS_PIPE_ALIGNED_DATA: __skb_pull(skb, 1); fallthrough; case PNS_PIPE_DATA: __skb_pull(skb, 3); /* Pipe data header */ if (!pn_flow_safe(pn->rx_fc)) { err = sock_queue_rcv_skb(sk, skb); if (!err) return NET_RX_SUCCESS; err = NET_RX_DROP; break; } if (pn->rx_credits == 0) { atomic_inc(&sk->sk_drops); err = NET_RX_DROP; break; } pn->rx_credits--; skb->dev = NULL; skb_set_owner_r(skb, sk); skb_queue_tail(&sk->sk_receive_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); return NET_RX_SUCCESS; case PNS_PEP_CONNECT_RESP: if (sk->sk_state != TCP_SYN_SENT) break; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); if (pep_connresp_rcv(sk, skb)) { sk->sk_state = TCP_CLOSE_WAIT; break; } if (pn->init_enable == PN_PIPE_DISABLE) sk->sk_state = TCP_SYN_RECV; else { sk->sk_state = TCP_ESTABLISHED; pipe_start_flow_control(sk); } break; case PNS_PEP_ENABLE_RESP: if (sk->sk_state != TCP_SYN_SENT) break; if (pep_enableresp_rcv(sk, skb)) { sk->sk_state = TCP_CLOSE_WAIT; break; } sk->sk_state = TCP_ESTABLISHED; pipe_start_flow_control(sk); break; case PNS_PEP_DISCONNECT_RESP: /* sock should already be dead, nothing to do */ break; case PNS_PEP_STATUS_IND: pipe_rcv_status(sk, skb); break; } kfree_skb(skb); return err; } /* Listening sock must be locked */ static struct sock *pep_find_pipe(const struct hlist_head *hlist, const struct sockaddr_pn *dst, u8 pipe_handle) { struct sock *sknode; u16 dobj = pn_sockaddr_get_object(dst); sk_for_each(sknode, hlist) { struct pep_sock *pnnode = pep_sk(sknode); /* Ports match, but addresses might not: */ if (pnnode->pn_sk.sobject != dobj) continue; if (pnnode->pipe_handle != pipe_handle) continue; if (sknode->sk_state == TCP_CLOSE_WAIT) continue; sock_hold(sknode); return sknode; } return NULL; } /* * Deliver an skb to a listening sock. * Socket lock must be held. * We then queue the skb to the right connected sock (if any). */ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct sock *sknode; struct pnpipehdr *hdr; struct sockaddr_pn dst; u8 pipe_handle; if (!pskb_may_pull(skb, sizeof(*hdr))) goto drop; hdr = pnp_hdr(skb); pipe_handle = hdr->pipe_handle; if (pipe_handle == PN_PIPE_INVALID_HANDLE) goto drop; pn_skb_get_dst_sockaddr(skb, &dst); /* Look for an existing pipe handle */ sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle); if (sknode) return sk_receive_skb(sknode, skb, 1); switch (hdr->message_id) { case PNS_PEP_CONNECT_REQ: if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) { pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC); break; } skb_queue_head(&sk->sk_receive_queue, skb); sk_acceptq_added(sk); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); return NET_RX_SUCCESS; case PNS_PEP_DISCONNECT_REQ: pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); break; case PNS_PEP_CTRL_REQ: pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC); break; case PNS_PEP_RESET_REQ: case PNS_PEP_ENABLE_REQ: case PNS_PEP_DISABLE_REQ: /* invalid handle is not even allowed here! */ break; default: if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT)) /* actively connected socket */ return pipe_handler_do_rcv(sk, skb); } drop: kfree_skb(skb); return NET_RX_SUCCESS; } static int pipe_do_remove(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *ph; struct sk_buff *skb; skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL); if (!skb) return -ENOMEM; ph = pnp_hdr(skb); ph->utid = 0; ph->message_id = PNS_PIPE_REMOVE_REQ; ph->pipe_handle = pn->pipe_handle; ph->data0 = PAD; return pn_skb_send(sk, skb, NULL); } /* associated socket ceases to exist */ static void pep_sock_close(struct sock *sk, long timeout) { struct pep_sock *pn = pep_sk(sk); int ifindex = 0; sock_hold(sk); /* keep a reference after sk_common_release() */ sk_common_release(sk); lock_sock(sk); if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) { if (sk->sk_backlog_rcv == pipe_do_rcv) /* Forcefully remove dangling Phonet pipe */ pipe_do_remove(sk); else pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD, NULL, 0); } sk->sk_state = TCP_CLOSE; ifindex = pn->ifindex; pn->ifindex = 0; release_sock(sk); if (ifindex) gprs_detach(sk); sock_put(sk); } static struct sock *pep_sock_accept(struct sock *sk, struct proto_accept_arg *arg) { struct pep_sock *pn = pep_sk(sk), *newpn; struct sock *newsk = NULL; struct sk_buff *skb; struct pnpipehdr *hdr; struct sockaddr_pn dst, src; int err; u16 peer_type; u8 pipe_handle, enabled, n_sb; u8 aligned = 0; skb = skb_recv_datagram(sk, (arg->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0, &arg->err); if (!skb) return NULL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto drop; } sk_acceptq_removed(sk); err = -EPROTO; if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) goto drop; hdr = pnp_hdr(skb); pipe_handle = hdr->pipe_handle; switch (hdr->state_after_connect) { case PN_PIPE_DISABLE: enabled = 0; break; case PN_PIPE_ENABLE: enabled = 1; break; default: pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM, GFP_KERNEL); goto drop; } peer_type = hdr->other_pep_type << 8; /* Parse sub-blocks (options) */ n_sb = hdr->data[3]; while (n_sb > 0) { u8 type, buf[1], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); if (data == NULL) goto drop; switch (type) { case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE: if (len < 1) goto drop; peer_type = (peer_type & 0xff00) | data[0]; break; case PN_PIPE_SB_ALIGNED_DATA: aligned = data[0] != 0; break; } n_sb--; } /* Check for duplicate pipe handle */ newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle); if (unlikely(newsk)) { __sock_put(newsk); newsk = NULL; pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL); goto drop; } /* Create a new to-be-accepted sock */ newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, arg->kern); if (!newsk) { pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); err = -ENOBUFS; goto drop; } sock_init_data(NULL, newsk); newsk->sk_state = TCP_SYN_RECV; newsk->sk_backlog_rcv = pipe_do_rcv; newsk->sk_protocol = sk->sk_protocol; newsk->sk_destruct = pipe_destruct; newpn = pep_sk(newsk); pn_skb_get_dst_sockaddr(skb, &dst); pn_skb_get_src_sockaddr(skb, &src); newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst); newpn->pn_sk.dobject = pn_sockaddr_get_object(&src); newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst); sock_hold(sk); newpn->listener = sk; skb_queue_head_init(&newpn->ctrlreq_queue); newpn->pipe_handle = pipe_handle; atomic_set(&newpn->tx_credits, 0); newpn->ifindex = 0; newpn->peer_type = peer_type; newpn->rx_credits = 0; newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; newpn->init_enable = enabled; newpn->aligned = aligned; err = pep_accept_conn(newsk, skb); if (err) { __sock_put(sk); sock_put(newsk); newsk = NULL; goto drop; } sk_add_node(newsk, &pn->hlist); drop: release_sock(sk); kfree_skb(skb); arg->err = err; return newsk; } static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len) { struct pep_sock *pn = pep_sk(sk); int err; u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD }; if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE) pn->pipe_handle = 1; /* anything but INVALID_HANDLE */ err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ, pn->init_enable, data, 4); if (err) { pn->pipe_handle = PN_PIPE_INVALID_HANDLE; return err; } sk->sk_state = TCP_SYN_SENT; return 0; } static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len) { int err; err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD, NULL, 0); if (err) return err; sk->sk_state = TCP_SYN_SENT; return 0; } static unsigned int pep_first_packet_length(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); struct sk_buff_head *q; struct sk_buff *skb; unsigned int len = 0; bool found = false; if (sock_flag(sk, SOCK_URGINLINE)) { q = &pn->ctrlreq_queue; spin_lock_bh(&q->lock); skb = skb_peek(q); if (skb) { len = skb->len; found = true; } spin_unlock_bh(&q->lock); } if (likely(!found)) { q = &sk->sk_receive_queue; spin_lock_bh(&q->lock); skb = skb_peek(q); if (skb) len = skb->len; spin_unlock_bh(&q->lock); } return len; } static int pep_ioctl(struct sock *sk, int cmd, int *karg) { struct pep_sock *pn = pep_sk(sk); int ret = -ENOIOCTLCMD; switch (cmd) { case SIOCINQ: if (sk->sk_state == TCP_LISTEN) { ret = -EINVAL; break; } *karg = pep_first_packet_length(sk); ret = 0; break; case SIOCPNENABLEPIPE: lock_sock(sk); if (sk->sk_state == TCP_SYN_SENT) ret = -EBUSY; else if (sk->sk_state == TCP_ESTABLISHED) ret = -EISCONN; else if (!pn->pn_sk.sobject) ret = -EADDRNOTAVAIL; else ret = pep_sock_enable(sk, NULL, 0); release_sock(sk); break; } return ret; } static int pep_init(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); sk->sk_destruct = pipe_destruct; INIT_HLIST_HEAD(&pn->hlist); pn->listener = NULL; skb_queue_head_init(&pn->ctrlreq_queue); atomic_set(&pn->tx_credits, 0); pn->ifindex = 0; pn->peer_type = 0; pn->pipe_handle = PN_PIPE_INVALID_HANDLE; pn->rx_credits = 0; pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; pn->init_enable = 1; pn->aligned = 0; return 0; } static int pep_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { struct pep_sock *pn = pep_sk(sk); int val = 0, err = 0; if (level != SOL_PNPIPE) return -ENOPROTOOPT; if (optlen >= sizeof(int)) { if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; } lock_sock(sk); switch (optname) { case PNPIPE_ENCAP: if (val && val != PNPIPE_ENCAP_IP) { err = -EINVAL; break; } if (!pn->ifindex == !val) break; /* Nothing to do! */ if (!capable(CAP_NET_ADMIN)) { err = -EPERM; break; } if (val) { release_sock(sk); err = gprs_attach(sk); if (err > 0) { pn->ifindex = err; err = 0; } } else { pn->ifindex = 0; release_sock(sk); gprs_detach(sk); err = 0; } goto out_norel; case PNPIPE_HANDLE: if ((sk->sk_state == TCP_CLOSE) && (val >= 0) && (val < PN_PIPE_INVALID_HANDLE)) pn->pipe_handle = val; else err = -EINVAL; break; case PNPIPE_INITSTATE: pn->init_enable = !!val; break; default: err = -ENOPROTOOPT; } release_sock(sk); out_norel: return err; } static int pep_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct pep_sock *pn = pep_sk(sk); int len, val; if (level != SOL_PNPIPE) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; switch (optname) { case PNPIPE_ENCAP: val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE; break; case PNPIPE_IFINDEX: val = pn->ifindex; break; case PNPIPE_HANDLE: val = pn->pipe_handle; if (val == PN_PIPE_INVALID_HANDLE) return -EINVAL; break; case PNPIPE_INITSTATE: val = pn->init_enable; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (put_user(val, (int __user *) optval)) return -EFAULT; return 0; } static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); struct pnpipehdr *ph; int err; if (pn_flow_safe(pn->tx_fc) && !atomic_add_unless(&pn->tx_credits, -1, 0)) { kfree_skb(skb); return -ENOBUFS; } skb_push(skb, 3 + pn->aligned); skb_reset_transport_header(skb); ph = pnp_hdr(skb); ph->utid = 0; if (pn->aligned) { ph->message_id = PNS_PIPE_ALIGNED_DATA; ph->data0 = 0; /* padding */ } else ph->message_id = PNS_PIPE_DATA; ph->pipe_handle = pn->pipe_handle; err = pn_skb_send(sk, skb, NULL); if (err && pn_flow_safe(pn->tx_fc)) atomic_inc(&pn->tx_credits); return err; } static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct pep_sock *pn = pep_sk(sk); struct sk_buff *skb; long timeo; int flags = msg->msg_flags; int err, done; if (len > USHRT_MAX) return -EMSGSIZE; if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| MSG_CMSG_COMPAT)) || !(msg->msg_flags & MSG_EOR)) return -EOPNOTSUPP; skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, flags & MSG_DONTWAIT, &err); if (!skb) return err; skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned); err = memcpy_from_msg(skb_put(skb, len), msg, len); if (err < 0) goto outfree; lock_sock(sk); timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { err = -ENOTCONN; goto out; } if (sk->sk_state != TCP_ESTABLISHED) { /* Wait until the pipe gets to enabled state */ disabled: err = sk_stream_wait_connect(sk, &timeo); if (err) goto out; if (sk->sk_state == TCP_CLOSE_WAIT) { err = -ECONNRESET; goto out; } } BUG_ON(sk->sk_state != TCP_ESTABLISHED); /* Wait until flow control allows TX */ done = atomic_read(&pn->tx_credits); while (!done) { DEFINE_WAIT_FUNC(wait, woken_wake_function); if (!timeo) { err = -EAGAIN; goto out; } if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } add_wait_queue(sk_sleep(sk), &wait); done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits), &wait); remove_wait_queue(sk_sleep(sk), &wait); if (sk->sk_state != TCP_ESTABLISHED) goto disabled; } err = pipe_skb_send(sk, skb); if (err >= 0) err = len; /* success! */ skb = NULL; out: release_sock(sk); outfree: kfree_skb(skb); return err; } int pep_writeable(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); return atomic_read(&pn->tx_credits); } int pep_write(struct sock *sk, struct sk_buff *skb) { struct sk_buff *rskb, *fs; int flen = 0; if (pep_sk(sk)->aligned) return pipe_skb_send(sk, skb); rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC); if (!rskb) { kfree_skb(skb); return -ENOMEM; } skb_shinfo(rskb)->frag_list = skb; rskb->len += skb->len; rskb->data_len += rskb->len; rskb->truesize += rskb->len; /* Avoid nested fragments */ skb_walk_frags(skb, fs) flen += fs->len; skb->next = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); skb->len -= flen; skb->data_len -= flen; skb->truesize -= flen; skb_reserve(rskb, MAX_PHONET_HEADER + 3); return pipe_skb_send(sk, rskb); } struct sk_buff *pep_read(struct sock *sk) { struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); if (sk->sk_state == TCP_ESTABLISHED) pipe_grant_credits(sk, GFP_ATOMIC); return skb; } static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct sk_buff *skb; int err; if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL| MSG_NOSIGNAL|MSG_CMSG_COMPAT)) return -EOPNOTSUPP; if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) return -ENOTCONN; if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) { /* Dequeue and acknowledge control request */ struct pep_sock *pn = pep_sk(sk); if (flags & MSG_PEEK) return -EOPNOTSUPP; skb = skb_dequeue(&pn->ctrlreq_queue); if (skb) { pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, GFP_KERNEL); msg->msg_flags |= MSG_OOB; goto copy; } if (flags & MSG_OOB) return -EINVAL; } skb = skb_recv_datagram(sk, flags, &err); lock_sock(sk); if (skb == NULL) { if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT) err = -ECONNRESET; release_sock(sk); return err; } if (sk->sk_state == TCP_ESTABLISHED) pipe_grant_credits(sk, GFP_KERNEL); release_sock(sk); copy: msg->msg_flags |= MSG_EOR; if (skb->len > len) msg->msg_flags |= MSG_TRUNC; else len = skb->len; err = skb_copy_datagram_msg(skb, 0, msg, len); if (!err) err = (flags & MSG_TRUNC) ? skb->len : len; skb_free_datagram(sk, skb); return err; } static void pep_sock_unhash(struct sock *sk) { struct pep_sock *pn = pep_sk(sk); struct sock *skparent = NULL; lock_sock(sk); if (pn->listener != NULL) { skparent = pn->listener; pn->listener = NULL; release_sock(sk); pn = pep_sk(skparent); lock_sock(skparent); sk_del_node_init(sk); sk = skparent; } /* Unhash a listening sock only when it is closed * and all of its active connected pipes are closed. */ if (hlist_empty(&pn->hlist)) pn_sock_unhash(&pn->pn_sk.sk); release_sock(sk); if (skparent) sock_put(skparent); } static struct proto pep_proto = { .close = pep_sock_close, .accept = pep_sock_accept, .connect = pep_sock_connect, .ioctl = pep_ioctl, .init = pep_init, .setsockopt = pep_setsockopt, .getsockopt = pep_getsockopt, .sendmsg = pep_sendmsg, .recvmsg = pep_recvmsg, .backlog_rcv = pep_do_rcv, .hash = pn_sock_hash, .unhash = pep_sock_unhash, .get_port = pn_sock_get_port, .obj_size = sizeof(struct pep_sock), .owner = THIS_MODULE, .name = "PNPIPE", }; static const struct phonet_protocol pep_pn_proto = { .ops = &phonet_stream_ops, .prot = &pep_proto, .sock_type = SOCK_SEQPACKET, }; static int __init pep_register(void) { return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto); } static void __exit pep_unregister(void) { phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto); } module_init(pep_register); module_exit(pep_unregister); MODULE_AUTHOR("Remi Denis-Courmont, Nokia"); MODULE_DESCRIPTION("Phonet pipe protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);
41 85 23 43 20 3 7 38 6 30 2 1 1 1 2 151 154 114 131 62 167 177 121 114 74 119 4 11 11 9 11 11 17 107 100 100 77 29 3 18 8 466 45 548 12 4 1 5 19 19 19 18 30 30 30 30 2 2 2 2 10 1 1 5 4 3 1 5 6 9 9 9 9 1 1 4 4 34 1 37 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __HCI_CORE_H #define __HCI_CORE_H #include <linux/idr.h> #include <linux/leds.h> #include <linux/rculist.h> #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sync.h> #include <net/bluetooth/hci_sock.h> #include <net/bluetooth/coredump.h> /* HCI priority */ #define HCI_PRIO_MAX 7 /* HCI maximum id value */ #define HCI_MAX_ID 10000 /* HCI Core structures */ struct inquiry_data { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_period_mode; __u8 pscan_mode; __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; __u8 ssp_mode; }; struct inquiry_entry { struct list_head all; /* inq_cache.all */ struct list_head list; /* unknown or resolve */ enum { NAME_NOT_KNOWN, NAME_NEEDED, NAME_PENDING, NAME_KNOWN, } name_state; __u32 timestamp; struct inquiry_data data; }; struct discovery_state { int type; enum { DISCOVERY_STOPPED, DISCOVERY_STARTING, DISCOVERY_FINDING, DISCOVERY_RESOLVING, DISCOVERY_STOPPING, } state; struct list_head all; /* All devices found during inquiry */ struct list_head unknown; /* Name state not known */ struct list_head resolve; /* Name needs to be resolved */ __u32 timestamp; bdaddr_t last_adv_addr; u8 last_adv_addr_type; s8 last_adv_rssi; u32 last_adv_flags; u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH]; u8 last_adv_data_len; bool report_invalid_rssi; bool result_filtering; bool limited; s8 rssi; u16 uuid_count; u8 (*uuids)[16]; unsigned long name_resolve_timeout; }; #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ enum suspend_tasks { SUSPEND_PAUSE_DISCOVERY, SUSPEND_UNPAUSE_DISCOVERY, SUSPEND_PAUSE_ADVERTISING, SUSPEND_UNPAUSE_ADVERTISING, SUSPEND_SCAN_DISABLE, SUSPEND_SCAN_ENABLE, SUSPEND_DISCONNECTING, SUSPEND_POWERING_DOWN, SUSPEND_PREPARE_NOTIFIER, SUSPEND_SET_ADV_FILTER, __SUSPEND_NUM_TASKS }; enum suspended_state { BT_RUNNING = 0, BT_SUSPEND_DISCONNECT, BT_SUSPEND_CONFIGURE_WAKE, }; struct hci_conn_hash { struct list_head list; unsigned int acl_num; unsigned int sco_num; unsigned int iso_num; unsigned int le_num; unsigned int le_num_peripheral; }; struct bdaddr_list { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; }; struct codec_list { struct list_head list; u8 id; __u16 cid; __u16 vid; u8 transport; u8 num_caps; u32 len; struct hci_codec_caps caps[]; }; struct bdaddr_list_with_irk { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; u8 peer_irk[16]; u8 local_irk[16]; }; /* Bitmask of connection flags */ enum hci_conn_flags { HCI_CONN_FLAG_REMOTE_WAKEUP = 1, HCI_CONN_FLAG_DEVICE_PRIVACY = 2, }; typedef u8 hci_conn_flags_t; struct bdaddr_list_with_flags { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; hci_conn_flags_t flags; }; struct bt_uuid { struct list_head list; u8 uuid[16]; u8 size; u8 svc_hint; }; struct blocked_key { struct list_head list; struct rcu_head rcu; u8 type; u8 val[16]; }; struct smp_csrk { bdaddr_t bdaddr; u8 bdaddr_type; u8 type; u8 val[16]; }; struct smp_ltk { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; u8 authenticated; u8 type; u8 enc_size; __le16 ediv; __le64 rand; u8 val[16]; }; struct smp_irk { struct list_head list; struct rcu_head rcu; bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; u8 val[16]; }; struct link_key { struct list_head list; struct rcu_head rcu; bdaddr_t bdaddr; u8 type; u8 val[HCI_LINK_KEY_SIZE]; u8 pin_len; }; struct oob_data { struct list_head list; bdaddr_t bdaddr; u8 bdaddr_type; u8 present; u8 hash192[16]; u8 rand192[16]; u8 hash256[16]; u8 rand256[16]; }; struct adv_info { struct list_head list; bool enabled; bool pending; bool periodic; __u8 mesh; __u8 instance; __u8 handle; __u32 flags; __u16 timeout; __u16 remaining_time; __u16 duration; __u16 adv_data_len; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; bool adv_data_changed; __u16 scan_rsp_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; bool scan_rsp_changed; __u16 per_adv_data_len; __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; __s8 tx_power; __u32 min_interval; __u32 max_interval; bdaddr_t random_addr; bool rpa_expired; struct delayed_work rpa_expired_cb; }; #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F #define DATA_CMP(_d1, _l1, _d2, _l2) \ (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2) #define ADV_DATA_CMP(_adv, _data, _len) \ DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len) #define SCAN_RSP_CMP(_adv, _data, _len) \ DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len) struct monitored_device { struct list_head list; bdaddr_t bdaddr; __u8 addr_type; __u16 handle; bool notified; }; struct adv_pattern { struct list_head list; __u8 ad_type; __u8 offset; __u8 length; __u8 value[HCI_MAX_EXT_AD_LENGTH]; }; struct adv_rssi_thresholds { __s8 low_threshold; __s8 high_threshold; __u16 low_threshold_timeout; __u16 high_threshold_timeout; __u8 sampling_period; }; struct adv_monitor { struct list_head patterns; struct adv_rssi_thresholds rssi; __u16 handle; enum { ADV_MONITOR_STATE_NOT_REGISTERED, ADV_MONITOR_STATE_REGISTERED, ADV_MONITOR_STATE_OFFLOADED } state; }; #define HCI_MIN_ADV_MONITOR_HANDLE 1 #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 #define HCI_ADV_MONITOR_EXT_NONE 1 #define HCI_ADV_MONITOR_EXT_MSFT 2 #define HCI_MAX_SHORT_NAME_LENGTH 10 #define HCI_CONN_HANDLE_MAX 0x0eff #define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX) /* Min encryption key size to match with SMP */ #define HCI_MIN_ENC_KEY_SIZE 7 /* Default LE RPA expiry time, 15 minutes */ #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) /* Default min/max age of connection information (1s/3s) */ #define DEFAULT_CONN_INFO_MIN_AGE 1000 #define DEFAULT_CONN_INFO_MAX_AGE 3000 /* Default authenticated payload timeout 30s */ #define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8 #define HCI_MAX_PAGES 3 struct hci_dev { struct list_head list; struct mutex lock; struct ida unset_handle_ida; const char *name; unsigned long flags; __u16 id; __u8 bus; bdaddr_t bdaddr; bdaddr_t setup_addr; bdaddr_t public_addr; bdaddr_t random_addr; bdaddr_t static_addr; __u8 adv_addr_type; __u8 dev_name[HCI_MAX_NAME_LENGTH]; __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; __u8 eir[HCI_MAX_EIR_LENGTH]; __u16 appearance; __u8 dev_class[3]; __u8 major_class; __u8 minor_class; __u8 max_page; __u8 features[HCI_MAX_PAGES][8]; __u8 le_features[8]; __u8 le_accept_list_size; __u8 le_resolv_list_size; __u8 le_num_of_adv_sets; __u8 le_states[8]; __u8 mesh_ad_types[16]; __u8 mesh_send_ref; __u8 commands[64]; __u8 hci_ver; __u16 hci_rev; __u8 lmp_ver; __u16 manufacturer; __u16 lmp_subver; __u16 voice_setting; __u8 num_iac; __u16 stored_max_keys; __u16 stored_num_keys; __u8 io_capability; __s8 inq_tx_power; __u8 err_data_reporting; __u16 page_scan_interval; __u16 page_scan_window; __u8 page_scan_type; __u8 le_adv_channel_map; __u16 le_adv_min_interval; __u16 le_adv_max_interval; __u8 le_scan_type; __u16 le_scan_interval; __u16 le_scan_window; __u16 le_scan_int_suspend; __u16 le_scan_window_suspend; __u16 le_scan_int_discovery; __u16 le_scan_window_discovery; __u16 le_scan_int_adv_monitor; __u16 le_scan_window_adv_monitor; __u16 le_scan_int_connect; __u16 le_scan_window_connect; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_latency; __u16 le_supv_timeout; __u16 le_def_tx_len; __u16 le_def_tx_time; __u16 le_max_tx_len; __u16 le_max_tx_time; __u16 le_max_rx_len; __u16 le_max_rx_time; __u8 le_max_key_size; __u8 le_min_key_size; __u16 discov_interleaved_timeout; __u16 conn_info_min_age; __u16 conn_info_max_age; __u16 auth_payload_timeout; __u8 min_enc_key_size; __u8 max_enc_key_size; __u8 pairing_opts; __u8 ssp_debug_mode; __u8 hw_error_code; __u32 clock; __u16 advmon_allowlist_duration; __u16 advmon_no_filter_duration; __u8 enable_advmon_interleave_scan; __u16 devid_source; __u16 devid_vendor; __u16 devid_product; __u16 devid_version; __u8 def_page_scan_type; __u16 def_page_scan_int; __u16 def_page_scan_window; __u8 def_inq_scan_type; __u16 def_inq_scan_int; __u16 def_inq_scan_window; __u16 def_br_lsto; __u16 def_page_timeout; __u16 def_multi_adv_rotation_duration; __u16 def_le_autoconnect_timeout; __s8 min_le_tx_power; __s8 max_le_tx_power; __u16 pkt_type; __u16 esco_type; __u16 link_policy; __u16 link_mode; __u32 idle_timeout; __u16 sniff_min_interval; __u16 sniff_max_interval; unsigned int auto_accept_delay; unsigned long quirks; atomic_t cmd_cnt; unsigned int acl_cnt; unsigned int sco_cnt; unsigned int le_cnt; unsigned int iso_cnt; unsigned int acl_mtu; unsigned int sco_mtu; unsigned int le_mtu; unsigned int iso_mtu; unsigned int acl_pkts; unsigned int sco_pkts; unsigned int le_pkts; unsigned int iso_pkts; unsigned long acl_last_tx; unsigned long le_last_tx; __u8 le_tx_def_phys; __u8 le_rx_def_phys; struct workqueue_struct *workqueue; struct workqueue_struct *req_workqueue; struct work_struct power_on; struct delayed_work power_off; struct work_struct error_reset; struct work_struct cmd_sync_work; struct list_head cmd_sync_work_list; struct mutex cmd_sync_work_lock; struct mutex unregister_lock; struct work_struct cmd_sync_cancel_work; struct work_struct reenable_adv_work; __u16 discov_timeout; struct delayed_work discov_off; struct delayed_work service_cache; struct delayed_work cmd_timer; struct delayed_work ncmd_timer; struct work_struct rx_work; struct work_struct cmd_work; struct work_struct tx_work; struct delayed_work le_scan_disable; struct sk_buff_head rx_q; struct sk_buff_head raw_q; struct sk_buff_head cmd_q; struct sk_buff *sent_cmd; struct sk_buff *recv_event; struct mutex req_lock; wait_queue_head_t req_wait_q; __u32 req_status; __u32 req_result; struct sk_buff *req_skb; struct sk_buff *req_rsp; void *smp_data; void *smp_bredr_data; struct discovery_state discovery; bool discovery_paused; int advertising_old_state; bool advertising_paused; struct notifier_block suspend_notifier; enum suspended_state suspend_state_next; enum suspended_state suspend_state; bool scanning_paused; bool suspended; u8 wake_reason; bdaddr_t wake_addr; u8 wake_addr_type; struct hci_conn_hash conn_hash; struct list_head mesh_pending; struct list_head mgmt_pending; struct list_head reject_list; struct list_head accept_list; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; struct list_head identity_resolving_keys; struct list_head remote_oob_data; struct list_head le_accept_list; struct list_head le_resolv_list; struct list_head le_conn_params; struct list_head pend_le_conns; struct list_head pend_le_reports; struct list_head blocked_keys; struct list_head local_codecs; struct hci_dev_stats stat; atomic_t promisc; const char *hw_info; const char *fw_info; struct dentry *debugfs; struct hci_devcoredump dump; struct device dev; struct rfkill *rfkill; DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); hci_conn_flags_t conn_flags; __s8 adv_tx_power; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 adv_data_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __u8 scan_rsp_data_len; __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; __u8 per_adv_data_len; struct list_head adv_instances; unsigned int adv_instance_cnt; __u8 cur_adv_instance; __u16 adv_instance_timeout; struct delayed_work adv_instance_expire; struct idr adv_monitors_idr; unsigned int adv_monitors_cnt; __u8 irk[16]; __u32 rpa_timeout; struct delayed_work rpa_expired; bdaddr_t rpa; struct delayed_work mesh_send_done; enum { INTERLEAVE_SCAN_NONE, INTERLEAVE_SCAN_NO_FILTER, INTERLEAVE_SCAN_ALLOWLIST } interleave_scan_state; struct delayed_work interleave_scan; struct list_head monitored_devices; bool advmon_pend_notify; #if IS_ENABLED(CONFIG_BT_LEDS) struct led_trigger *power_led; #endif #if IS_ENABLED(CONFIG_BT_MSFTEXT) __u16 msft_opcode; void *msft_data; bool msft_curve_validity; #endif #if IS_ENABLED(CONFIG_BT_AOSPEXT) bool aosp_capable; bool aosp_quality_report; #endif int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); int (*setup)(struct hci_dev *hdev); int (*shutdown)(struct hci_dev *hdev); int (*send)(struct hci_dev *hdev, struct sk_buff *skb); void (*notify)(struct hci_dev *hdev, unsigned int evt); void (*hw_error)(struct hci_dev *hdev, u8 code); int (*post_init)(struct hci_dev *hdev); int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); void (*cmd_timeout)(struct hci_dev *hdev); void (*reset)(struct hci_dev *hdev); bool (*wakeup)(struct hci_dev *hdev); int (*set_quality_report)(struct hci_dev *hdev, bool enable); int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data); u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb); }; #define HCI_PHY_HANDLE(handle) (handle & 0xff) enum conn_reasons { CONN_REASON_PAIR_DEVICE, CONN_REASON_L2CAP_CHAN, CONN_REASON_SCO_CONNECT, CONN_REASON_ISO_CONNECT, }; struct hci_conn { struct list_head list; atomic_t refcnt; bdaddr_t dst; __u8 dst_type; bdaddr_t src; __u8 src_type; bdaddr_t init_addr; __u8 init_addr_type; bdaddr_t resp_addr; __u8 resp_addr_type; __u8 adv_instance; __u16 handle; __u16 sync_handle; __u16 state; __u16 mtu; __u8 mode; __u8 type; __u8 role; bool out; __u8 attempt; __u8 dev_class[3]; __u8 features[HCI_MAX_PAGES][8]; __u16 pkt_type; __u16 link_policy; __u8 key_type; __u8 auth_type; __u8 sec_level; __u8 pending_sec_level; __u8 pin_length; __u8 enc_key_size; __u8 io_capability; __u32 passkey_notify; __u8 passkey_entered; __u16 disc_timeout; __u16 conn_timeout; __u16 setting; __u16 auth_payload_timeout; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH]; __u8 le_adv_data_len; __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN]; __u16 le_per_adv_data_len; __u16 le_per_adv_data_offset; __u8 le_adv_phy; __u8 le_adv_sec_phy; __u8 le_tx_phy; __u8 le_rx_phy; __s8 rssi; __s8 tx_power; __s8 max_tx_power; struct bt_iso_qos iso_qos; unsigned long flags; enum conn_reasons conn_reason; __u8 abort_reason; __u32 clock; __u16 clock_accuracy; unsigned long conn_info_timestamp; __u8 remote_cap; __u8 remote_auth; __u8 remote_id; unsigned int sent; struct sk_buff_head data_q; struct list_head chan_list; struct delayed_work disc_work; struct delayed_work auto_accept_work; struct delayed_work idle_work; struct delayed_work le_conn_timeout; struct device dev; struct dentry *debugfs; struct hci_dev *hdev; void *l2cap_data; void *sco_data; void *iso_data; struct list_head link_list; struct hci_conn *parent; struct hci_link *link; struct bt_codec codec; void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); void (*security_cfm_cb) (struct hci_conn *conn, u8 status); void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); void (*cleanup)(struct hci_conn *conn); }; struct hci_link { struct list_head list; struct hci_conn *conn; }; struct hci_chan { struct list_head list; __u16 handle; struct hci_conn *conn; struct sk_buff_head data_q; unsigned int sent; __u8 state; }; struct hci_conn_params { struct list_head list; struct list_head action; bdaddr_t addr; u8 addr_type; u16 conn_min_interval; u16 conn_max_interval; u16 conn_latency; u16 supervision_timeout; enum { HCI_AUTO_CONN_DISABLED, HCI_AUTO_CONN_REPORT, HCI_AUTO_CONN_DIRECT, HCI_AUTO_CONN_ALWAYS, HCI_AUTO_CONN_LINK_LOSS, HCI_AUTO_CONN_EXPLICIT, } auto_connect; struct hci_conn *conn; bool explicit_connect; /* Accessed without hdev->lock: */ hci_conn_flags_t flags; u8 privacy_mode; }; extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; extern struct mutex hci_cb_list_lock; #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) #define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) #define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_volatile_flags(hdev) \ do { \ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ hci_dev_clear_flag(hdev, HCI_LE_ADV); \ hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ } while (0) #define hci_dev_le_state_simultaneous(hdev) \ (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \ (hdev->le_states[4] & 0x08) && /* Central */ \ (hdev->le_states[4] & 0x40) && /* Peripheral */ \ (hdev->le_states[3] & 0x10)) /* Simultaneous */ /* ----- HCI interface to upper protocols ----- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); int l2cap_disconn_ind(struct hci_conn *hcon); void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); #if IS_ENABLED(CONFIG_BT_BREDR) int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); #else static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { return 0; } static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { } #endif #if IS_ENABLED(CONFIG_BT_LE) int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); #else static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { return 0; } static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { } #endif /* ----- Inquiry cache ----- */ #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ static inline void discovery_init(struct hci_dev *hdev) { hdev->discovery.state = DISCOVERY_STOPPED; INIT_LIST_HEAD(&hdev->discovery.all); INIT_LIST_HEAD(&hdev->discovery.unknown); INIT_LIST_HEAD(&hdev->discovery.resolve); hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; } static inline void hci_discovery_filter_clear(struct hci_dev *hdev) { hdev->discovery.result_filtering = false; hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; hdev->discovery.uuid_count = 0; kfree(hdev->discovery.uuids); hdev->discovery.uuids = NULL; } bool hci_discovery_active(struct hci_dev *hdev); void hci_discovery_set_state(struct hci_dev *hdev, int state); static inline int inquiry_cache_empty(struct hci_dev *hdev) { return list_empty(&hdev->discovery.all); } static inline long inquiry_cache_age(struct hci_dev *hdev) { struct discovery_state *c = &hdev->discovery; return jiffies - c->timestamp; } static inline long inquiry_entry_age(struct inquiry_entry *e) { return jiffies - e->timestamp; } struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, bdaddr_t *bdaddr); struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, bdaddr_t *bdaddr, int state); void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, struct inquiry_entry *ie); u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, bool name_known); void hci_inquiry_cache_flush(struct hci_dev *hdev); /* ----- HCI Connections ----- */ enum { HCI_CONN_AUTH_PEND, HCI_CONN_ENCRYPT_PEND, HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, HCI_CONN_SCO_SETUP_PEND, HCI_CONN_MGMT_CONNECTED, HCI_CONN_SSP_ENABLED, HCI_CONN_SC_ENABLED, HCI_CONN_AES_CCM, HCI_CONN_POWER_SAVE, HCI_CONN_FLUSH_KEY, HCI_CONN_ENCRYPT, HCI_CONN_AUTH, HCI_CONN_SECURE, HCI_CONN_FIPS, HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, HCI_CONN_DROP, HCI_CONN_CANCEL, HCI_CONN_PARAM_REMOVAL_PEND, HCI_CONN_NEW_LINK_KEY, HCI_CONN_SCANNING, HCI_CONN_AUTH_FAILURE, HCI_CONN_PER_ADV, HCI_CONN_BIG_CREATED, HCI_CONN_CREATE_CIS, HCI_CONN_BIG_SYNC, HCI_CONN_BIG_SYNC_FAILED, HCI_CONN_PA_SYNC, HCI_CONN_PA_SYNC_FAILED, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } static inline bool hci_conn_sc_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && test_bit(HCI_CONN_SC_ENABLED, &conn->flags); } static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; list_add_tail_rcu(&c->list, &h->list); switch (c->type) { case ACL_LINK: h->acl_num++; break; case LE_LINK: h->le_num++; if (c->role == HCI_ROLE_SLAVE) h->le_num_peripheral++; break; case SCO_LINK: case ESCO_LINK: h->sco_num++; break; case ISO_LINK: h->iso_num++; break; } } static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; list_del_rcu(&c->list); synchronize_rcu(); switch (c->type) { case ACL_LINK: h->acl_num--; break; case LE_LINK: h->le_num--; if (c->role == HCI_ROLE_SLAVE) h->le_num_peripheral--; break; case SCO_LINK: case ESCO_LINK: h->sco_num--; break; case ISO_LINK: h->iso_num--; break; } } static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; switch (type) { case ACL_LINK: return h->acl_num; case LE_LINK: return h->le_num; case SCO_LINK: case ESCO_LINK: return h->sco_num; case ISO_LINK: return h->iso_num; default: return 0; } } static inline unsigned int hci_conn_count(struct hci_dev *hdev) { struct hci_conn_hash *c = &hdev->conn_hash; return c->acl_num + c->sco_num + c->le_num + c->iso_num; } static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c == conn) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; __u8 type = INVALID_LINK; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->handle == handle) { type = c->type; break; } } rcu_read_unlock(); return type; } static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, bdaddr_t *ba, __u8 bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, ba) || c->type != ISO_LINK) continue; if (c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, bdaddr_t *ba, __u8 big, __u8 bis) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, ba) || c->type != ISO_LINK || !test_bit(HCI_CONN_PER_ADV, &c->flags)) continue; if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->handle == handle) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, __u8 type, bdaddr_t *ba) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && !bacmp(&c->dst, ba)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, bdaddr_t *ba, __u8 ba_type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != LE_LINK) continue; if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, bdaddr_t *ba, __u8 ba_type, __u8 cig, __u8 id) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; /* Match CIG ID if set */ if (cig != c->iso_qos.ucast.cig) continue; /* Match CIS ID if set */ if (id != c->iso_qos.ucast.cis) continue; /* Match destination address if set */ if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, __u8 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY)) continue; if (handle == c->iso_qos.ucast.cig) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, __u8 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK) continue; if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK || c->state != state) continue; if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK || !test_bit(HCI_CONN_PA_SYNC, &c->flags)) continue; if (c->iso_qos.bcast.big == big) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn * hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != ISO_LINK) continue; if (c->sync_handle == sync_handle) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->state == state) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data); static inline void hci_conn_hash_list_state(struct hci_dev *hdev, hci_conn_func_t func, __u8 type, __u16 state, void *data) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; if (!func) return; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->state == state) func(c, data); } rcu_read_unlock(); } static inline void hci_conn_hash_list_flag(struct hci_dev *hdev, hci_conn_func_t func, __u8 type, __u8 flag, void *data) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; if (!func) return; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && test_bit(flag, &c->flags)) func(c, data); } rcu_read_unlock(); } static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && !test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return c; } } rcu_read_unlock(); return NULL; } /* Returns true if an le connection is in the scanning state */ static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); bool hci_iso_setup_path(struct hci_conn *conn); int hci_le_create_cis_pending(struct hci_dev *hdev); int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role, u16 handle); struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role); void hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); struct hci_chan *hci_chan_create(struct hci_conn *conn); void hci_chan_del(struct hci_chan *chan); void hci_chan_list_flush(struct hci_conn *conn); struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, enum conn_reasons conn_reason); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, bool dst_resolved, u8 sec_level, u16 conn_timeout, u8 role, u8 phy, u8 sec_phy); void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type, enum conn_reasons conn_reason, u16 timeout); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting, struct bt_codec *codec, u16 timeout); struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, struct bt_iso_qos *qos, __u8 base_len, __u8 *base); struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos); struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos, __u8 data_len, __u8 *data); struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, bool initiator); int hci_conn_switch_role(struct hci_conn *conn, __u8 role); void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); void hci_conn_failed(struct hci_conn *conn, u8 status); u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle); /* * hci_conn_get() and hci_conn_put() are used to control the life-time of an * "hci_conn" object. They do not guarantee that the hci_conn object is running, * working or anything else. They just guarantee that the object is available * and can be dereferenced. So you can use its locks, local variables and any * other constant data. * Before accessing runtime data, you _must_ lock the object and then check that * it is still running. As soon as you release the locks, the connection might * get dropped, though. * * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control * how long the underlying connection is held. So every channel that runs on the * hci_conn object calls this to prevent the connection from disappearing. As * long as you hold a device, you must also guarantee that you have a valid * reference to the device via hci_conn_get() (or the initial reference from * hci_conn_add()). * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't * break because nobody cares for that. But this means, we cannot use * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). */ static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) { get_device(&conn->dev); return conn; } static inline void hci_conn_put(struct hci_conn *conn) { put_device(&conn->dev); } static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn) { BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); atomic_inc(&conn->refcnt); cancel_delayed_work(&conn->disc_work); return conn; } static inline void hci_conn_drop(struct hci_conn *conn) { BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); if (atomic_dec_and_test(&conn->refcnt)) { unsigned long timeo; switch (conn->type) { case ACL_LINK: case LE_LINK: cancel_delayed_work(&conn->idle_work); if (conn->state == BT_CONNECTED) { timeo = conn->disc_timeout; if (!conn->out) timeo *= 2; } else { timeo = 0; } break; default: timeo = 0; break; } cancel_delayed_work(&conn->disc_work); queue_delayed_work(conn->hdev->workqueue, &conn->disc_work, timeo); } } /* ----- HCI Devices ----- */ static inline void hci_dev_put(struct hci_dev *d) { BT_DBG("%s orig refcnt %d", d->name, kref_read(&d->dev.kobj.kref)); put_device(&d->dev); } static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) { BT_DBG("%s orig refcnt %d", d->name, kref_read(&d->dev.kobj.kref)); get_device(&d->dev); return d; } #define hci_dev_lock(d) mutex_lock(&d->lock) #define hci_dev_unlock(d) mutex_unlock(&d->lock) #define to_hci_dev(d) container_of(d, struct hci_dev, dev) #define to_hci_conn(c) container_of(c, struct hci_conn, dev) static inline void *hci_get_drvdata(struct hci_dev *hdev) { return dev_get_drvdata(&hdev->dev); } static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) { dev_set_drvdata(&hdev->dev, data); } static inline void *hci_get_priv(struct hci_dev *hdev) { return (char *)hdev + sizeof(*hdev); } struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); struct hci_dev *hci_alloc_dev_priv(int sizeof_priv); static inline struct hci_dev *hci_alloc_dev(void) { return hci_alloc_dev_priv(0); } void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); void hci_release_dev(struct hci_dev *hdev); int hci_register_suspend_notifier(struct hci_dev *hdev); int hci_unregister_suspend_notifier(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) { #if IS_ENABLED(CONFIG_BT_MSFTEXT) hdev->msft_opcode = opcode; #endif } static inline void hci_set_aosp_capable(struct hci_dev *hdev) { #if IS_ENABLED(CONFIG_BT_AOSPEXT) hdev->aosp_capable = true; #endif } static inline void hci_devcd_setup(struct hci_dev *hdev) { #ifdef CONFIG_DEV_COREDUMP INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx); INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout); skb_queue_head_init(&hdev->dump.dump_q); #endif } int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_do_close(struct hci_dev *hdev); int hci_dev_reset(__u16 dev); int hci_dev_reset_stat(__u16 dev); int hci_dev_cmd(unsigned int cmd, void __user *arg); int hci_get_dev_list(void __user *arg); int hci_get_dev_info(void __user *arg); int hci_get_conn_list(void __user *arg); int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); int hci_inquiry(void __user *arg); struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, bdaddr_t *bdaddr, u8 type); struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *list, bdaddr_t *bdaddr, u8 type); struct bdaddr_list_with_flags * hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type, u8 *peer_irk, u8 *local_irk); int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type, u32 flags); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); void hci_conn_params_clear_disabled(struct hci_dev *hdev); void hci_conn_params_free(struct hci_conn_params *param); void hci_pend_le_list_del_init(struct hci_conn_params *param); void hci_pend_le_list_add(struct hci_conn_params *param, struct list_head *list); struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type); void hci_uuids_clear(struct hci_dev *hdev); void hci_link_keys_clear(struct hci_dev *hdev); struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent); struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 role); int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_smp_ltks_clear(struct hci_dev *hdev); int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa); struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 val[16], bdaddr_t *rpa); void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]); void hci_blocked_keys_clear(struct hci_dev *hdev); void hci_smp_irks_clear(struct hci_dev *hdev); bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 *hash192, u8 *rand192, u8 *hash256, u8 *rand256); int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_adv_instances_clear(struct hci_dev *hdev); struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance); struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, u16 timeout, u16 duration, s8 tx_power, u32 min_interval, u32 max_interval, u8 mesh_handle); struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u32 flags, u8 data_len, u8 *data, u32 min_interval, u32 max_interval); int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle); int hci_remove_all_adv_monitor(struct hci_dev *hdev); bool hci_is_adv_monitoring(struct hci_dev *hdev); int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_init_sysfs(struct hci_dev *hdev); void hci_conn_init_sysfs(struct hci_conn *conn); void hci_conn_add_sysfs(struct hci_conn *conn); void hci_conn_del_sysfs(struct hci_conn *conn); #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) #define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent) /* ----- LMP capabilities ----- */ #define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT) #define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH) #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) #define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) #define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH) #define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) #define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) #define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) /* ----- Extended LMP capabilities ----- */ #define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) #define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) #define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING) /* ----- Host capabilities ----- */ #define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) #define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC) #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) #define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ !hci_dev_test_flag(dev, HCI_AUTO_OFF)) #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ hci_dev_test_flag(dev, HCI_SC_ENABLED)) #define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ !adv->rpa_expired) #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) #define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M)) #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) #define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \ !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \ &(dev)->quirks)) #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) /* Use LL Privacy based address resolution if supported */ #define use_ll_privacy(dev) (ll_privacy_capable(dev) && \ hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY)) #define privacy_mode_capable(dev) (use_ll_privacy(dev) && \ (hdev->commands[39] & 0x04)) #define read_key_size_capable(dev) \ ((dev)->commands[20] & 0x10 && \ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) /* Use enhanced synchronous connection if command is supported and its quirk * has not been set. */ #define enhanced_sync_conn_capable(dev) \ (((dev)->commands[29] & 0x08) && \ !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks)) /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40) && \ !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks)) /* Use ext create connection if command is supported */ #define use_ext_conn(dev) ((dev)->commands[37] & 0x80) /* Extended advertising support */ #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) /* Maximum advertising length */ #define max_adv_len(dev) \ (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH) /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789: * * C24: Mandatory if the LE Controller supports Connection State and either * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported */ #define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \ ext_adv_capable(dev)) /* Periodic advertising support */ #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV)) /* CIS Master/Slave and BIS support */ #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev)) #define cis_capable(dev) \ (cis_central_capable(dev) || cis_peripheral_capable(dev)) #define cis_central_capable(dev) \ ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL) #define cis_peripheral_capable(dev) \ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) #define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \ (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks))) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type, __u8 *flags) { switch (type) { case ACL_LINK: return l2cap_connect_ind(hdev, bdaddr); case SCO_LINK: case ESCO_LINK: return sco_connect_ind(hdev, bdaddr, flags); case ISO_LINK: return iso_connect_ind(hdev, bdaddr, flags); default: BT_ERR("unknown link type %d", type); return -EINVAL; } } static inline int hci_proto_disconn_ind(struct hci_conn *conn) { if (conn->type != ACL_LINK && conn->type != LE_LINK) return HCI_ERROR_REMOTE_USER_TERM; return l2cap_disconn_ind(conn); } /* ----- HCI callbacks ----- */ struct hci_cb { struct list_head list; char *name; void (*connect_cfm) (struct hci_conn *conn, __u8 status); void (*disconn_cfm) (struct hci_conn *conn, __u8 status); void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->connect_cfm) cb->connect_cfm(conn, status); } mutex_unlock(&hci_cb_list_lock); if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); } static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { struct hci_cb *cb; mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->disconn_cfm) cb->disconn_cfm(conn, reason); } mutex_unlock(&hci_cb_list_lock); if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); } static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; __u8 encrypt; if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) return; encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } mutex_unlock(&hci_cb_list_lock); if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; __u8 encrypt; if (conn->state == BT_CONFIG) { if (!status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); hci_conn_drop(conn); return; } if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) encrypt = 0x00; else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) encrypt = 0x02; else encrypt = 0x01; if (!status) { if (conn->sec_level == BT_SECURITY_SDP) conn->sec_level = BT_SECURITY_LOW; if (conn->pending_sec_level > conn->sec_level) conn->sec_level = conn->pending_sec_level; } mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->security_cfm) cb->security_cfm(conn, status, encrypt); } mutex_unlock(&hci_cb_list_lock); if (conn->security_cfm_cb) conn->security_cfm_cb(conn, status); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); } mutex_unlock(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) { struct hci_cb *cb; mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); } mutex_unlock(&hci_cb_list_lock); } static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) { if (addr_type != ADDR_LE_DEV_RANDOM) return false; if ((bdaddr->b[5] & 0xc0) == 0x40) return true; return false; } static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type) { if (addr_type == ADDR_LE_DEV_PUBLIC) return true; /* Check for Random Static address type */ if ((addr->b[5] & 0xc0) == 0xc0) return true; return false; } static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { if (!hci_bdaddr_is_rpa(bdaddr, addr_type)) return NULL; return hci_find_irk_by_rpa(hdev, bdaddr); } static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, u16 to_multiplier) { u16 max_latency; if (min > max) { BT_WARN("min %d > max %d", min, max); return -EINVAL; } if (min < 6) { BT_WARN("min %d < 6", min); return -EINVAL; } if (max > 3200) { BT_WARN("max %d > 3200", max); return -EINVAL; } if (to_multiplier < 10) { BT_WARN("to_multiplier %d < 10", to_multiplier); return -EINVAL; } if (to_multiplier > 3200) { BT_WARN("to_multiplier %d > 3200", to_multiplier); return -EINVAL; } if (max >= to_multiplier * 8) { BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier); return -EINVAL; } max_latency = (to_multiplier * 4 / max) - 1; if (latency > 499) { BT_WARN("latency %d > 499", latency); return -EINVAL; } if (latency > max_latency) { BT_WARN("latency %d > max_latency %d", latency, max_latency); return -EINVAL; } return 0; } int hci_register_cb(struct hci_cb *hcb); int hci_unregister_cb(struct hci_cb *hcb); int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param); int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param); void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); void *hci_recv_event_data(struct hci_dev *hdev, __u8 event); u32 hci_conn_get_phy(struct hci_conn *conn); /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk); void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, void *data, u16 data_len, ktime_t tstamp, int flag, struct sock *skip_sk); void hci_sock_dev_event(struct hci_dev *hdev, int event); #define HCI_MGMT_VAR_LEN BIT(0) #define HCI_MGMT_NO_HDEV BIT(1) #define HCI_MGMT_UNTRUSTED BIT(2) #define HCI_MGMT_UNCONFIGURED BIT(3) #define HCI_MGMT_HDEV_OPTIONAL BIT(4) struct hci_mgmt_handler { int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len); size_t data_len; unsigned long flags; }; struct hci_mgmt_chan { struct list_head list; unsigned short channel; size_t handler_count; const struct hci_mgmt_handler *handlers; void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); }; int hci_mgmt_chan_register(struct hci_mgmt_chan *c); void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); /* Management interface */ #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ BIT(BDADDR_LE_RANDOM)) #define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \ BIT(BDADDR_LE_PUBLIC) | \ BIT(BDADDR_LE_RANDOM)) /* These LE scan and inquiry parameters were chosen according to LE General * Discovery Procedure specification. */ #define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */ #define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */ #define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */ #define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */ #define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */ #define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */ #define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */ #define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */ #define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */ #define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */ #define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */ #define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */ #define DISCOV_LE_TIMEOUT 10240 /* msec */ #define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 #define DISCOV_BREDR_INQUIRY_LEN 0x08 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ #define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */ #define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */ #define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */ #define INTERVAL_TO_MS(x) (((x) * 10) / 0x10) #define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */ void mgmt_fill_version_info(void *ver); int mgmt_new_settings(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_removed(struct hci_dev *hdev); void mgmt_set_powered_failed(struct hci_dev *hdev, int err); void mgmt_power_on(struct hci_dev *hdev, int err); void __mgmt_power_off(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status); int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 value, u8 confirm_hint); int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type); int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status); int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered); void mgmt_auth_failed(struct hci_conn *conn, u8 status); void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u64 instant); void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); void mgmt_discovering(struct hci_dev *hdev, u8 discovering); void mgmt_suspending(struct hci_dev *hdev, u8 state); void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, u8 addr_type); bool mgmt_powering_down(struct hci_dev *hdev); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent); void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bool persistent); void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 store_hint, u16 min_interval, u16 max_interval, u16 latency, u16 timeout); void mgmt_smp_complete(struct hci_conn *conn, bool complete); bool mgmt_get_connectable(struct hci_dev *hdev); u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, bdaddr_t *bdaddr, u8 addr_type); int hci_abort_conn(struct hci_conn *conn, u8 reason); u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, __u8 ltk[16], __u8 key_size); void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *bdaddr_type); #define SCO_AIRMODE_MASK 0x0003 #define SCO_AIRMODE_CVSD 0x0000 #define SCO_AIRMODE_TRANSP 0x0003 #define LOCAL_CODEC_ACL_MASK BIT(0) #define LOCAL_CODEC_SCO_MASK BIT(1) #define TRANSPORT_TYPE_MAX 0x04 #endif /* __HCI_CORE_H */
1 24 24 1 4257 4258 1 4257 24 24 24 24 24 9 6 6 23 23 6 23 26 26 26 26 26 9 17 26 26 26 24 21 6 6 233 24 24 1014 6 4 365 362 4 4 995 999 233 233 3 1 3 4 2 30 3 1 1 3 22 13 24 24 24 38 1 11 19 2 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> * Copyright (C) 2002 Andi Kleen * * This handles calls from both 32bit and 64bit mode. * * Lock order: * context.ldt_usr_sem * mmap_lock * context.lock */ #include <linux/errno.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> #include <asm/ldt.h> #include <asm/tlb.h> #include <asm/desc.h> #include <asm/mmu_context.h> #include <asm/pgtable_areas.h> #include <xen/xen.h> /* This is a multiple of PAGE_SIZE. */ #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) static inline void *ldt_slot_va(int slot) { return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); } void load_mm_ldt(struct mm_struct *mm) { struct ldt_struct *ldt; /* READ_ONCE synchronizes with smp_store_release */ ldt = READ_ONCE(mm->context.ldt); /* * Any change to mm->context.ldt is followed by an IPI to all * CPUs with the mm active. The LDT will not be freed until * after the IPI is handled by all such CPUs. This means that * if the ldt_struct changes before we return, the values we see * will be safe, and the new values will be loaded before we run * any user code. * * NB: don't try to convert this to use RCU without extreme care. * We would still need IRQs off, because we don't want to change * the local LDT after an IPI loaded a newer value than the one * that we can see. */ if (unlikely(ldt)) { if (static_cpu_has(X86_FEATURE_PTI)) { if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { /* * Whoops -- either the new LDT isn't mapped * (if slot == -1) or is mapped into a bogus * slot (if slot > 1). */ clear_LDT(); return; } /* * If page table isolation is enabled, ldt->entries * will not be mapped in the userspace pagetables. * Tell the CPU to access the LDT through the alias * at ldt_slot_va(ldt->slot). */ set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); } else { set_ldt(ldt->entries, ldt->nr_entries); } } else { clear_LDT(); } } void switch_ldt(struct mm_struct *prev, struct mm_struct *next) { /* * Load the LDT if either the old or new mm had an LDT. * * An mm will never go from having an LDT to not having an LDT. Two * mms never share an LDT, so we don't gain anything by checking to * see whether the LDT changed. There's also no guarantee that * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, * then prev->context.ldt will also be non-NULL. * * If we really cared, we could optimize the case where prev == next * and we're exiting lazy mode. Most of the time, if this happens, * we don't actually need to reload LDTR, but modify_ldt() is mostly * used by legacy code and emulators where we don't need this level of * performance. * * This uses | instead of || because it generates better code. */ if (unlikely((unsigned long)prev->context.ldt | (unsigned long)next->context.ldt)) load_mm_ldt(next); DEBUG_LOCKS_WARN_ON(preemptible()); } static void refresh_ldt_segments(void) { #ifdef CONFIG_X86_64 unsigned short sel; /* * Make sure that the cached DS and ES descriptors match the updated * LDT. */ savesegment(ds, sel); if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) loadsegment(ds, sel); savesegment(es, sel); if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) loadsegment(es, sel); #endif } /* context.lock is held by the task which issued the smp function call */ static void flush_ldt(void *__mm) { struct mm_struct *mm = __mm; if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) return; load_mm_ldt(mm); refresh_ldt_segments(); } /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) { struct ldt_struct *new_ldt; unsigned int alloc_size; if (num_entries > LDT_ENTRIES) return NULL; new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL_ACCOUNT); if (!new_ldt) return NULL; BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); alloc_size = num_entries * LDT_ENTRY_SIZE; /* * Xen is very picky: it requires a page-aligned LDT that has no * trailing nonzero bytes in any page that contains LDT descriptors. * Keep it simple: zero the whole allocation and never allocate less * than PAGE_SIZE. */ if (alloc_size > PAGE_SIZE) new_ldt->entries = __vmalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); else new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); if (!new_ldt->entries) { kfree(new_ldt); return NULL; } /* The new LDT isn't aliased for PTI yet. */ new_ldt->slot = -1; new_ldt->nr_entries = num_entries; return new_ldt; } #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION static void do_sanity_check(struct mm_struct *mm, bool had_kernel_mapping, bool had_user_mapping) { if (mm->context.ldt) { /* * We already had an LDT. The top-level entry should already * have been allocated and synchronized with the usermode * tables. */ WARN_ON(!had_kernel_mapping); if (boot_cpu_has(X86_FEATURE_PTI)) WARN_ON(!had_user_mapping); } else { /* * This is the first time we're mapping an LDT for this process. * Sync the pgd to the usermode tables. */ WARN_ON(had_kernel_mapping); if (boot_cpu_has(X86_FEATURE_PTI)) WARN_ON(had_user_mapping); } } #ifdef CONFIG_X86_PAE static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va) { p4d_t *p4d; pud_t *pud; if (pgd->pgd == 0) return NULL; p4d = p4d_offset(pgd, va); if (p4d_none(*p4d)) return NULL; pud = pud_offset(p4d, va); if (pud_none(*pud)) return NULL; return pmd_offset(pud, va); } static void map_ldt_struct_to_user(struct mm_struct *mm) { pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); pmd_t *k_pmd, *u_pmd; k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR); u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR); if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) set_pmd(u_pmd, *k_pmd); } static void sanity_check_ldt_mapping(struct mm_struct *mm) { pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd); bool had_kernel, had_user; pmd_t *k_pmd, *u_pmd; k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR); u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR); had_kernel = (k_pmd->pmd != 0); had_user = (u_pmd->pmd != 0); do_sanity_check(mm, had_kernel, had_user); } #else /* !CONFIG_X86_PAE */ static void map_ldt_struct_to_user(struct mm_struct *mm) { pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) set_pgd(kernel_to_user_pgdp(pgd), *pgd); } static void sanity_check_ldt_mapping(struct mm_struct *mm) { pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR); bool had_kernel = (pgd->pgd != 0); bool had_user = (kernel_to_user_pgdp(pgd)->pgd != 0); do_sanity_check(mm, had_kernel, had_user); } #endif /* CONFIG_X86_PAE */ /* * If PTI is enabled, this maps the LDT into the kernelmode and * usermode tables for the given mm. */ static int map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) { unsigned long va; bool is_vmalloc; spinlock_t *ptl; int i, nr_pages; if (!boot_cpu_has(X86_FEATURE_PTI)) return 0; /* * Any given ldt_struct should have map_ldt_struct() called at most * once. */ WARN_ON(ldt->slot != -1); /* Check if the current mappings are sane */ sanity_check_ldt_mapping(mm); is_vmalloc = is_vmalloc_addr(ldt->entries); nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); for (i = 0; i < nr_pages; i++) { unsigned long offset = i << PAGE_SHIFT; const void *src = (char *)ldt->entries + offset; unsigned long pfn; pgprot_t pte_prot; pte_t pte, *ptep; va = (unsigned long)ldt_slot_va(slot) + offset; pfn = is_vmalloc ? vmalloc_to_pfn(src) : page_to_pfn(virt_to_page(src)); /* * Treat the PTI LDT range as a *userspace* range. * get_locked_pte() will allocate all needed pagetables * and account for them in this mm. */ ptep = get_locked_pte(mm, va, &ptl); if (!ptep) return -ENOMEM; /* * Map it RO so the easy to find address is not a primary * target via some kernel interface which misses a * permission check. */ pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); /* Filter out unsuppored __PAGE_KERNEL* bits: */ pgprot_val(pte_prot) &= __supported_pte_mask; pte = pfn_pte(pfn, pte_prot); set_pte_at(mm, va, ptep, pte); pte_unmap_unlock(ptep, ptl); } /* Propagate LDT mapping to the user page-table */ map_ldt_struct_to_user(mm); ldt->slot = slot; return 0; } static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) { unsigned long va; int i, nr_pages; if (!ldt) return; /* LDT map/unmap is only required for PTI */ if (!boot_cpu_has(X86_FEATURE_PTI)) return; nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); for (i = 0; i < nr_pages; i++) { unsigned long offset = i << PAGE_SHIFT; spinlock_t *ptl; pte_t *ptep; va = (unsigned long)ldt_slot_va(ldt->slot) + offset; ptep = get_locked_pte(mm, va, &ptl); if (!WARN_ON_ONCE(!ptep)) { pte_clear(mm, va, ptep); pte_unmap_unlock(ptep, ptl); } } va = (unsigned long)ldt_slot_va(ldt->slot); flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); } #else /* !CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */ static int map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) { return 0; } static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) { } #endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */ static void free_ldt_pgtables(struct mm_struct *mm) { #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION struct mmu_gather tlb; unsigned long start = LDT_BASE_ADDR; unsigned long end = LDT_END_ADDR; if (!boot_cpu_has(X86_FEATURE_PTI)) return; /* * Although free_pgd_range() is intended for freeing user * page-tables, it also works out for kernel mappings on x86. * We use tlb_gather_mmu_fullmm() to avoid confusing the * range-tracking logic in __tlb_adjust_range(). */ tlb_gather_mmu_fullmm(&tlb, mm); free_pgd_range(&tlb, start, end, start, end); tlb_finish_mmu(&tlb); #endif } /* After calling this, the LDT is immutable. */ static void finalize_ldt_struct(struct ldt_struct *ldt) { paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); } static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) { mutex_lock(&mm->context.lock); /* Synchronizes with READ_ONCE in load_mm_ldt. */ smp_store_release(&mm->context.ldt, ldt); /* Activate the LDT for all CPUs using currents mm. */ on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); mutex_unlock(&mm->context.lock); } static void free_ldt_struct(struct ldt_struct *ldt) { if (likely(!ldt)) return; paravirt_free_ldt(ldt->entries, ldt->nr_entries); if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE) vfree_atomic(ldt->entries); else free_page((unsigned long)ldt->entries); kfree(ldt); } /* * Called on fork from arch_dup_mmap(). Just copy the current LDT state, * the new task is not running, so nothing can be installed. */ int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ldt_struct *new_ldt; int retval = 0; if (!old_mm) return 0; mutex_lock(&old_mm->context.lock); if (!old_mm->context.ldt) goto out_unlock; new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); if (!new_ldt) { retval = -ENOMEM; goto out_unlock; } memcpy(new_ldt->entries, old_mm->context.ldt->entries, new_ldt->nr_entries * LDT_ENTRY_SIZE); finalize_ldt_struct(new_ldt); retval = map_ldt_struct(mm, new_ldt, 0); if (retval) { free_ldt_pgtables(mm); free_ldt_struct(new_ldt); goto out_unlock; } mm->context.ldt = new_ldt; out_unlock: mutex_unlock(&old_mm->context.lock); return retval; } /* * No need to lock the MM as we are the last user * * 64bit: Don't touch the LDT register - we're already in the next thread. */ void destroy_context_ldt(struct mm_struct *mm) { free_ldt_struct(mm->context.ldt); mm->context.ldt = NULL; } void ldt_arch_exit_mmap(struct mm_struct *mm) { free_ldt_pgtables(mm); } static int read_ldt(void __user *ptr, unsigned long bytecount) { struct mm_struct *mm = current->mm; unsigned long entries_size; int retval; down_read(&mm->context.ldt_usr_sem); if (!mm->context.ldt) { retval = 0; goto out_unlock; } if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE; if (entries_size > bytecount) entries_size = bytecount; if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { retval = -EFAULT; goto out_unlock; } if (entries_size != bytecount) { /* Zero-fill the rest and pretend we read bytecount bytes. */ if (clear_user(ptr + entries_size, bytecount - entries_size)) { retval = -EFAULT; goto out_unlock; } } retval = bytecount; out_unlock: up_read(&mm->context.ldt_usr_sem); return retval; } static int read_default_ldt(void __user *ptr, unsigned long bytecount) { /* CHECKME: Can we use _one_ random number ? */ #ifdef CONFIG_X86_32 unsigned long size = 5 * sizeof(struct desc_struct); #else unsigned long size = 128; #endif if (bytecount > size) bytecount = size; if (clear_user(ptr, bytecount)) return -EFAULT; return bytecount; } static bool allow_16bit_segments(void) { if (!IS_ENABLED(CONFIG_X86_16BIT)) return false; #ifdef CONFIG_XEN_PV /* * Xen PV does not implement ESPFIX64, which means that 16-bit * segments will not work correctly. Until either Xen PV implements * ESPFIX64 and can signal this fact to the guest or unless someone * provides compelling evidence that allowing broken 16-bit segments * is worthwhile, disallow 16-bit segments under Xen PV. */ if (xen_pv_domain()) { pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n"); return false; } #endif return true; } static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) { struct mm_struct *mm = current->mm; struct ldt_struct *new_ldt, *old_ldt; unsigned int old_nr_entries, new_nr_entries; struct user_desc ldt_info; struct desc_struct ldt; int error; error = -EINVAL; if (bytecount != sizeof(ldt_info)) goto out; error = -EFAULT; if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info))) goto out; error = -EINVAL; if (ldt_info.entry_number >= LDT_ENTRIES) goto out; if (ldt_info.contents == 3) { if (oldmode) goto out; if (ldt_info.seg_not_present == 0) goto out; } if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || LDT_empty(&ldt_info)) { /* The user wants to clear the entry. */ memset(&ldt, 0, sizeof(ldt)); } else { if (!ldt_info.seg_32bit && !allow_16bit_segments()) { error = -EINVAL; goto out; } fill_ldt(&ldt, &ldt_info); if (oldmode) ldt.avl = 0; } if (down_write_killable(&mm->context.ldt_usr_sem)) return -EINTR; old_ldt = mm->context.ldt; old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries); error = -ENOMEM; new_ldt = alloc_ldt_struct(new_nr_entries); if (!new_ldt) goto out_unlock; if (old_ldt) memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); new_ldt->entries[ldt_info.entry_number] = ldt; finalize_ldt_struct(new_ldt); /* * If we are using PTI, map the new LDT into the userspace pagetables. * If there is already an LDT, use the other slot so that other CPUs * will continue to use the old LDT until install_ldt() switches * them over to the new LDT. */ error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); if (error) { /* * This only can fail for the first LDT setup. If an LDT is * already installed then the PTE page is already * populated. Mop up a half populated page table. */ if (!WARN_ON_ONCE(old_ldt)) free_ldt_pgtables(mm); free_ldt_struct(new_ldt); goto out_unlock; } install_ldt(mm, new_ldt); unmap_ldt_struct(mm, old_ldt); free_ldt_struct(old_ldt); error = 0; out_unlock: up_write(&mm->context.ldt_usr_sem); out: return error; } SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , unsigned long , bytecount) { int ret = -ENOSYS; switch (func) { case 0: ret = read_ldt(ptr, bytecount); break; case 1: ret = write_ldt(ptr, bytecount, 1); break; case 2: ret = read_default_ldt(ptr, bytecount); break; case 0x11: ret = write_ldt(ptr, bytecount, 0); break; } /* * The SYSCALL_DEFINE() macros give us an 'unsigned long' * return type, but the ABI for sys_modify_ldt() expects * 'int'. This cast gives us an int-sized value in %rax * for the return code. The 'unsigned' is necessary so * the compiler does not try to sign-extend the negative * return codes into the high half of the register when * taking the value from int->long. */ return (unsigned int)ret; }
9 4 5 1 5 5 18 5 13 4 3 7 2 10 8 8 7 7 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 // SPDX-License-Identifier: GPL-2.0-or-later /* Public-key operation keyctls * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/slab.h> #include <linux/err.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/parser.h> #include <linux/uaccess.h> #include <keys/user-type.h> #include "internal.h" static void keyctl_pkey_params_free(struct kernel_pkey_params *params) { kfree(params->info); key_put(params->key); } enum { Opt_err, Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */ Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */ }; static const match_table_t param_keys = { { Opt_enc, "enc=%s" }, { Opt_hash, "hash=%s" }, { Opt_err, NULL } }; /* * Parse the information string which consists of key=val pairs. */ static int keyctl_pkey_params_parse(struct kernel_pkey_params *params) { unsigned long token_mask = 0; substring_t args[MAX_OPT_ARGS]; char *c = params->info, *p, *q; int token; while ((p = strsep(&c, " \t"))) { if (*p == '\0' || *p == ' ' || *p == '\t') continue; token = match_token(p, param_keys, args); if (token == Opt_err) return -EINVAL; if (__test_and_set_bit(token, &token_mask)) return -EINVAL; q = args[0].from; if (!q[0]) return -EINVAL; switch (token) { case Opt_enc: params->encoding = q; break; case Opt_hash: params->hash_algo = q; break; default: return -EINVAL; } } return 0; } /* * Interpret parameters. Callers must always call the free function * on params, even if an error is returned. */ static int keyctl_pkey_params_get(key_serial_t id, const char __user *_info, struct kernel_pkey_params *params) { key_ref_t key_ref; void *p; int ret; memset(params, 0, sizeof(*params)); params->encoding = "raw"; p = strndup_user(_info, PAGE_SIZE); if (IS_ERR(p)) return PTR_ERR(p); params->info = p; ret = keyctl_pkey_params_parse(params); if (ret < 0) return ret; key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); params->key = key_ref_to_ptr(key_ref); if (!params->key->type->asym_query) return -EOPNOTSUPP; return 0; } /* * Get parameters from userspace. Callers must always call the free function * on params, even if an error is returned. */ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_params, const char __user *_info, int op, struct kernel_pkey_params *params) { struct keyctl_pkey_params uparams; struct kernel_pkey_query info; int ret; memset(params, 0, sizeof(*params)); params->encoding = "raw"; if (copy_from_user(&uparams, _params, sizeof(uparams)) != 0) return -EFAULT; ret = keyctl_pkey_params_get(uparams.key_id, _info, params); if (ret < 0) return ret; ret = params->key->type->asym_query(params, &info); if (ret < 0) return ret; switch (op) { case KEYCTL_PKEY_ENCRYPT: if (uparams.in_len > info.max_dec_size || uparams.out_len > info.max_enc_size) return -EINVAL; break; case KEYCTL_PKEY_DECRYPT: if (uparams.in_len > info.max_enc_size || uparams.out_len > info.max_dec_size) return -EINVAL; break; case KEYCTL_PKEY_SIGN: if (uparams.in_len > info.max_data_size || uparams.out_len > info.max_sig_size) return -EINVAL; break; case KEYCTL_PKEY_VERIFY: if (uparams.in_len > info.max_data_size || uparams.in2_len > info.max_sig_size) return -EINVAL; break; default: BUG(); } params->in_len = uparams.in_len; params->out_len = uparams.out_len; /* Note: same as in2_len */ return 0; } /* * Query information about an asymmetric key. */ long keyctl_pkey_query(key_serial_t id, const char __user *_info, struct keyctl_pkey_query __user *_res) { struct kernel_pkey_params params; struct kernel_pkey_query res; long ret; ret = keyctl_pkey_params_get(id, _info, &params); if (ret < 0) goto error; ret = params.key->type->asym_query(&params, &res); if (ret < 0) goto error; ret = -EFAULT; if (copy_to_user(_res, &res, sizeof(res)) == 0 && clear_user(_res->__spare, sizeof(_res->__spare)) == 0) ret = 0; error: keyctl_pkey_params_free(&params); return ret; } /* * Encrypt/decrypt/sign * * Encrypt data, decrypt data or sign data using a public key. * * _info is a string of supplementary information in key=val format. For * instance, it might contain: * * "enc=pkcs1 hash=sha256" * * where enc= specifies the encoding and hash= selects the OID to go in that * particular encoding if required. If enc= isn't supplied, it's assumed that * the caller is supplying raw values. * * If successful, the amount of data written into the output buffer is * returned. */ long keyctl_pkey_e_d_s(int op, const struct keyctl_pkey_params __user *_params, const char __user *_info, const void __user *_in, void __user *_out) { struct kernel_pkey_params params; void *in, *out; long ret; ret = keyctl_pkey_params_get_2(_params, _info, op, &params); if (ret < 0) goto error_params; ret = -EOPNOTSUPP; if (!params.key->type->asym_eds_op) goto error_params; switch (op) { case KEYCTL_PKEY_ENCRYPT: params.op = kernel_pkey_encrypt; break; case KEYCTL_PKEY_DECRYPT: params.op = kernel_pkey_decrypt; break; case KEYCTL_PKEY_SIGN: params.op = kernel_pkey_sign; break; default: BUG(); } in = memdup_user(_in, params.in_len); if (IS_ERR(in)) { ret = PTR_ERR(in); goto error_params; } ret = -ENOMEM; out = kmalloc(params.out_len, GFP_KERNEL); if (!out) goto error_in; ret = params.key->type->asym_eds_op(&params, in, out); if (ret < 0) goto error_out; if (copy_to_user(_out, out, ret) != 0) ret = -EFAULT; error_out: kfree(out); error_in: kfree(in); error_params: keyctl_pkey_params_free(&params); return ret; } /* * Verify a signature. * * Verify a public key signature using the given key, or if not given, search * for a matching key. * * _info is a string of supplementary information in key=val format. For * instance, it might contain: * * "enc=pkcs1 hash=sha256" * * where enc= specifies the signature blob encoding and hash= selects the OID * to go in that particular encoding. If enc= isn't supplied, it's assumed * that the caller is supplying raw values. * * If successful, 0 is returned. */ long keyctl_pkey_verify(const struct keyctl_pkey_params __user *_params, const char __user *_info, const void __user *_in, const void __user *_in2) { struct kernel_pkey_params params; void *in, *in2; long ret; ret = keyctl_pkey_params_get_2(_params, _info, KEYCTL_PKEY_VERIFY, &params); if (ret < 0) goto error_params; ret = -EOPNOTSUPP; if (!params.key->type->asym_verify_signature) goto error_params; in = memdup_user(_in, params.in_len); if (IS_ERR(in)) { ret = PTR_ERR(in); goto error_params; } in2 = memdup_user(_in2, params.in2_len); if (IS_ERR(in2)) { ret = PTR_ERR(in2); goto error_in; } params.op = kernel_pkey_verify; ret = params.key->type->asym_verify_signature(&params, in, in2); kfree(in2); error_in: kfree(in); error_params: keyctl_pkey_params_free(&params); return ret; }
4 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 // SPDX-License-Identifier: GPL-2.0-only /* * Pegasus Mobile Notetaker Pen input tablet driver * * Copyright (c) 2016 Martin Kepplinger <martink@posteo.de> */ /* * request packet (control endpoint): * |-------------------------------------| * | Report ID | Nr of bytes | command | * | (1 byte) | (1 byte) | (n bytes) | * |-------------------------------------| * | 0x02 | n | | * |-------------------------------------| * * data packet after set xy mode command, 0x80 0xb5 0x02 0x01 * and pen is in range: * * byte byte name value (bits) * -------------------------------------------- * 0 status 0 1 0 0 0 0 X X * 1 color 0 0 0 0 H 0 S T * 2 X low * 3 X high * 4 Y low * 5 Y high * * X X battery state: * no state reported 0x00 * battery low 0x01 * battery good 0x02 * * H Hovering * S Switch 1 (pen button) * T Tip */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/input.h> #include <linux/usb/input.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/mutex.h> /* USB HID defines */ #define USB_REQ_GET_REPORT 0x01 #define USB_REQ_SET_REPORT 0x09 #define USB_VENDOR_ID_PEGASUSTECH 0x0e20 #define USB_DEVICE_ID_PEGASUS_NOTETAKER_EN100 0x0101 /* device specific defines */ #define NOTETAKER_REPORT_ID 0x02 #define NOTETAKER_SET_CMD 0x80 #define NOTETAKER_SET_MODE 0xb5 #define NOTETAKER_LED_MOUSE 0x02 #define PEN_MODE_XY 0x01 #define SPECIAL_COMMAND 0x80 #define BUTTON_PRESSED 0xb5 #define COMMAND_VERSION 0xa9 /* in xy data packet */ #define BATTERY_NO_REPORT 0x40 #define BATTERY_LOW 0x41 #define BATTERY_GOOD 0x42 #define PEN_BUTTON_PRESSED BIT(1) #define PEN_TIP BIT(0) struct pegasus { unsigned char *data; u8 data_len; dma_addr_t data_dma; struct input_dev *dev; struct usb_device *usbdev; struct usb_interface *intf; struct urb *irq; /* serialize access to open/suspend */ struct mutex pm_mutex; bool is_open; char name[128]; char phys[64]; struct work_struct init; }; static int pegasus_control_msg(struct pegasus *pegasus, u8 *data, int len) { const int sizeof_buf = len + 2; int result; int error; u8 *cmd_buf; cmd_buf = kmalloc(sizeof_buf, GFP_KERNEL); if (!cmd_buf) return -ENOMEM; cmd_buf[0] = NOTETAKER_REPORT_ID; cmd_buf[1] = len; memcpy(cmd_buf + 2, data, len); result = usb_control_msg(pegasus->usbdev, usb_sndctrlpipe(pegasus->usbdev, 0), USB_REQ_SET_REPORT, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, cmd_buf, sizeof_buf, USB_CTRL_SET_TIMEOUT); kfree(cmd_buf); if (unlikely(result != sizeof_buf)) { error = result < 0 ? result : -EIO; dev_err(&pegasus->usbdev->dev, "control msg error: %d\n", error); return error; } return 0; } static int pegasus_set_mode(struct pegasus *pegasus, u8 mode, u8 led) { u8 cmd[] = { NOTETAKER_SET_CMD, NOTETAKER_SET_MODE, led, mode }; return pegasus_control_msg(pegasus, cmd, sizeof(cmd)); } static void pegasus_parse_packet(struct pegasus *pegasus) { unsigned char *data = pegasus->data; struct input_dev *dev = pegasus->dev; u16 x, y; switch (data[0]) { case SPECIAL_COMMAND: /* device button pressed */ if (data[1] == BUTTON_PRESSED) schedule_work(&pegasus->init); break; /* xy data */ case BATTERY_LOW: dev_warn_once(&dev->dev, "Pen battery low\n"); fallthrough; case BATTERY_NO_REPORT: case BATTERY_GOOD: x = le16_to_cpup((__le16 *)&data[2]); y = le16_to_cpup((__le16 *)&data[4]); /* pen-up event */ if (x == 0 && y == 0) break; input_report_key(dev, BTN_TOUCH, data[1] & PEN_TIP); input_report_key(dev, BTN_RIGHT, data[1] & PEN_BUTTON_PRESSED); input_report_key(dev, BTN_TOOL_PEN, 1); input_report_abs(dev, ABS_X, (s16)x); input_report_abs(dev, ABS_Y, y); input_sync(dev); break; default: dev_warn_once(&pegasus->usbdev->dev, "unknown answer from device\n"); } } static void pegasus_irq(struct urb *urb) { struct pegasus *pegasus = urb->context; struct usb_device *dev = pegasus->usbdev; int retval; switch (urb->status) { case 0: pegasus_parse_packet(pegasus); usb_mark_last_busy(pegasus->usbdev); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dev_err(&dev->dev, "%s - urb shutting down with status: %d", __func__, urb->status); return; default: dev_err(&dev->dev, "%s - nonzero urb status received: %d", __func__, urb->status); break; } retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&dev->dev, "%s - usb_submit_urb failed with result %d", __func__, retval); } static void pegasus_init(struct work_struct *work) { struct pegasus *pegasus = container_of(work, struct pegasus, init); int error; error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); if (error) dev_err(&pegasus->usbdev->dev, "pegasus_set_mode error: %d\n", error); } static int pegasus_open(struct input_dev *dev) { struct pegasus *pegasus = input_get_drvdata(dev); int error; error = usb_autopm_get_interface(pegasus->intf); if (error) return error; mutex_lock(&pegasus->pm_mutex); pegasus->irq->dev = pegasus->usbdev; if (usb_submit_urb(pegasus->irq, GFP_KERNEL)) { error = -EIO; goto err_autopm_put; } error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); if (error) goto err_kill_urb; pegasus->is_open = true; mutex_unlock(&pegasus->pm_mutex); return 0; err_kill_urb: usb_kill_urb(pegasus->irq); cancel_work_sync(&pegasus->init); err_autopm_put: mutex_unlock(&pegasus->pm_mutex); usb_autopm_put_interface(pegasus->intf); return error; } static void pegasus_close(struct input_dev *dev) { struct pegasus *pegasus = input_get_drvdata(dev); mutex_lock(&pegasus->pm_mutex); usb_kill_urb(pegasus->irq); cancel_work_sync(&pegasus->init); pegasus->is_open = false; mutex_unlock(&pegasus->pm_mutex); usb_autopm_put_interface(pegasus->intf); } static int pegasus_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct pegasus *pegasus; struct input_dev *input_dev; int error; int pipe; /* We control interface 0 */ if (intf->cur_altsetting->desc.bInterfaceNumber >= 1) return -ENODEV; /* Sanity check that the device has an endpoint */ if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "Invalid number of endpoints\n"); return -EINVAL; } endpoint = &intf->cur_altsetting->endpoint[0].desc; pegasus = kzalloc(sizeof(*pegasus), GFP_KERNEL); input_dev = input_allocate_device(); if (!pegasus || !input_dev) { error = -ENOMEM; goto err_free_mem; } mutex_init(&pegasus->pm_mutex); pegasus->usbdev = dev; pegasus->dev = input_dev; pegasus->intf = intf; pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); /* Sanity check that pipe's type matches endpoint's type */ if (usb_pipe_type_check(dev, pipe)) { error = -EINVAL; goto err_free_mem; } pegasus->data_len = usb_maxpacket(dev, pipe); pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL, &pegasus->data_dma); if (!pegasus->data) { error = -ENOMEM; goto err_free_mem; } pegasus->irq = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->irq) { error = -ENOMEM; goto err_free_dma; } usb_fill_int_urb(pegasus->irq, dev, pipe, pegasus->data, pegasus->data_len, pegasus_irq, pegasus, endpoint->bInterval); pegasus->irq->transfer_dma = pegasus->data_dma; pegasus->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (dev->manufacturer) strscpy(pegasus->name, dev->manufacturer, sizeof(pegasus->name)); if (dev->product) { if (dev->manufacturer) strlcat(pegasus->name, " ", sizeof(pegasus->name)); strlcat(pegasus->name, dev->product, sizeof(pegasus->name)); } if (!strlen(pegasus->name)) snprintf(pegasus->name, sizeof(pegasus->name), "USB Pegasus Device %04x:%04x", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); usb_make_path(dev, pegasus->phys, sizeof(pegasus->phys)); strlcat(pegasus->phys, "/input0", sizeof(pegasus->phys)); INIT_WORK(&pegasus->init, pegasus_init); usb_set_intfdata(intf, pegasus); input_dev->name = pegasus->name; input_dev->phys = pegasus->phys; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, pegasus); input_dev->open = pegasus_open; input_dev->close = pegasus_close; __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); __set_bit(ABS_X, input_dev->absbit); __set_bit(ABS_Y, input_dev->absbit); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); __set_bit(INPUT_PROP_POINTER, input_dev->propbit); input_set_abs_params(input_dev, ABS_X, -1500, 1500, 8, 0); input_set_abs_params(input_dev, ABS_Y, 1600, 3000, 8, 0); error = input_register_device(pegasus->dev); if (error) goto err_free_urb; return 0; err_free_urb: usb_free_urb(pegasus->irq); err_free_dma: usb_free_coherent(dev, pegasus->data_len, pegasus->data, pegasus->data_dma); err_free_mem: input_free_device(input_dev); kfree(pegasus); usb_set_intfdata(intf, NULL); return error; } static void pegasus_disconnect(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); input_unregister_device(pegasus->dev); usb_free_urb(pegasus->irq); usb_free_coherent(interface_to_usbdev(intf), pegasus->data_len, pegasus->data, pegasus->data_dma); kfree(pegasus); usb_set_intfdata(intf, NULL); } static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) { struct pegasus *pegasus = usb_get_intfdata(intf); mutex_lock(&pegasus->pm_mutex); usb_kill_urb(pegasus->irq); cancel_work_sync(&pegasus->init); mutex_unlock(&pegasus->pm_mutex); return 0; } static int pegasus_resume(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); int retval = 0; mutex_lock(&pegasus->pm_mutex); if (pegasus->is_open && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0) retval = -EIO; mutex_unlock(&pegasus->pm_mutex); return retval; } static int pegasus_reset_resume(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); int retval = 0; mutex_lock(&pegasus->pm_mutex); if (pegasus->is_open) { retval = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE); if (!retval && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0) retval = -EIO; } mutex_unlock(&pegasus->pm_mutex); return retval; } static const struct usb_device_id pegasus_ids[] = { { USB_DEVICE(USB_VENDOR_ID_PEGASUSTECH, USB_DEVICE_ID_PEGASUS_NOTETAKER_EN100) }, { } }; MODULE_DEVICE_TABLE(usb, pegasus_ids); static struct usb_driver pegasus_driver = { .name = "pegasus_notetaker", .probe = pegasus_probe, .disconnect = pegasus_disconnect, .suspend = pegasus_suspend, .resume = pegasus_resume, .reset_resume = pegasus_reset_resume, .id_table = pegasus_ids, .supports_autosuspend = 1, }; module_usb_driver(pegasus_driver); MODULE_AUTHOR("Martin Kepplinger <martink@posteo.de>"); MODULE_DESCRIPTION("Pegasus Mobile Notetaker Pen tablet driver"); MODULE_LICENSE("GPL");
7 3 4 3 1 2 7 7 2 5 5 5 5 5 5 5 3 3 2 3 7 7 1 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/affs/inode.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. * * (C) 1991 Linus Torvalds - minix filesystem */ #include <linux/module.h> #include <linux/init.h> #include <linux/statfs.h> #include <linux/parser.h> #include <linux/magic.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/slab.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/seq_file.h> #include <linux/iversion.h> #include "affs.h" static int affs_statfs(struct dentry *dentry, struct kstatfs *buf); static int affs_show_options(struct seq_file *m, struct dentry *root); static int affs_remount (struct super_block *sb, int *flags, char *data); static void affs_commit_super(struct super_block *sb, int wait) { struct affs_sb_info *sbi = AFFS_SB(sb); struct buffer_head *bh = sbi->s_root_bh; struct affs_root_tail *tail = AFFS_ROOT_TAIL(sb, bh); lock_buffer(bh); affs_secs_to_datestamp(ktime_get_real_seconds(), &tail->disk_change); affs_fix_checksum(sb, bh); unlock_buffer(bh); mark_buffer_dirty(bh); if (wait) sync_dirty_buffer(bh); } static void affs_put_super(struct super_block *sb) { struct affs_sb_info *sbi = AFFS_SB(sb); pr_debug("%s()\n", __func__); cancel_delayed_work_sync(&sbi->sb_work); } static int affs_sync_fs(struct super_block *sb, int wait) { affs_commit_super(sb, wait); return 0; } static void flush_superblock(struct work_struct *work) { struct affs_sb_info *sbi; struct super_block *sb; sbi = container_of(work, struct affs_sb_info, sb_work.work); sb = sbi->sb; spin_lock(&sbi->work_lock); sbi->work_queued = 0; spin_unlock(&sbi->work_lock); affs_commit_super(sb, 1); } void affs_mark_sb_dirty(struct super_block *sb) { struct affs_sb_info *sbi = AFFS_SB(sb); unsigned long delay; if (sb_rdonly(sb)) return; spin_lock(&sbi->work_lock); if (!sbi->work_queued) { delay = msecs_to_jiffies(dirty_writeback_interval * 10); queue_delayed_work(system_long_wq, &sbi->sb_work, delay); sbi->work_queued = 1; } spin_unlock(&sbi->work_lock); } static struct kmem_cache * affs_inode_cachep; static struct inode *affs_alloc_inode(struct super_block *sb) { struct affs_inode_info *i; i = alloc_inode_sb(sb, affs_inode_cachep, GFP_KERNEL); if (!i) return NULL; inode_set_iversion(&i->vfs_inode, 1); i->i_lc = NULL; i->i_ext_bh = NULL; i->i_pa_cnt = 0; return &i->vfs_inode; } static void affs_free_inode(struct inode *inode) { kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); } static void init_once(void *foo) { struct affs_inode_info *ei = (struct affs_inode_info *) foo; mutex_init(&ei->i_link_lock); mutex_init(&ei->i_ext_lock); inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { affs_inode_cachep = kmem_cache_create("affs_inode_cache", sizeof(struct affs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT), init_once); if (affs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(affs_inode_cachep); } static const struct super_operations affs_sops = { .alloc_inode = affs_alloc_inode, .free_inode = affs_free_inode, .write_inode = affs_write_inode, .evict_inode = affs_evict_inode, .put_super = affs_put_super, .sync_fs = affs_sync_fs, .statfs = affs_statfs, .remount_fs = affs_remount, .show_options = affs_show_options, }; enum { Opt_bs, Opt_mode, Opt_mufs, Opt_notruncate, Opt_prefix, Opt_protect, Opt_reserved, Opt_root, Opt_setgid, Opt_setuid, Opt_verbose, Opt_volume, Opt_ignore, Opt_err, }; static const match_table_t tokens = { {Opt_bs, "bs=%u"}, {Opt_mode, "mode=%o"}, {Opt_mufs, "mufs"}, {Opt_notruncate, "nofilenametruncate"}, {Opt_prefix, "prefix=%s"}, {Opt_protect, "protect"}, {Opt_reserved, "reserved=%u"}, {Opt_root, "root=%u"}, {Opt_setgid, "setgid=%u"}, {Opt_setuid, "setuid=%u"}, {Opt_verbose, "verbose"}, {Opt_volume, "volume=%s"}, {Opt_ignore, "grpquota"}, {Opt_ignore, "noquota"}, {Opt_ignore, "quota"}, {Opt_ignore, "usrquota"}, {Opt_err, NULL}, }; static int parse_options(char *options, kuid_t *uid, kgid_t *gid, int *mode, int *reserved, s32 *root, int *blocksize, char **prefix, char *volume, unsigned long *mount_opts) { char *p; substring_t args[MAX_OPT_ARGS]; /* Fill in defaults */ *uid = current_uid(); *gid = current_gid(); *reserved = 2; *root = -1; *blocksize = -1; volume[0] = ':'; volume[1] = 0; *mount_opts = 0; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { int token, n, option; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_bs: if (match_int(&args[0], &n)) return 0; if (n != 512 && n != 1024 && n != 2048 && n != 4096) { pr_warn("Invalid blocksize (512, 1024, 2048, 4096 allowed)\n"); return 0; } *blocksize = n; break; case Opt_mode: if (match_octal(&args[0], &option)) return 0; *mode = option & 0777; affs_set_opt(*mount_opts, SF_SETMODE); break; case Opt_mufs: affs_set_opt(*mount_opts, SF_MUFS); break; case Opt_notruncate: affs_set_opt(*mount_opts, SF_NO_TRUNCATE); break; case Opt_prefix: kfree(*prefix); *prefix = match_strdup(&args[0]); if (!*prefix) return 0; affs_set_opt(*mount_opts, SF_PREFIX); break; case Opt_protect: affs_set_opt(*mount_opts, SF_IMMUTABLE); break; case Opt_reserved: if (match_int(&args[0], reserved)) return 0; break; case Opt_root: if (match_int(&args[0], root)) return 0; break; case Opt_setgid: if (match_int(&args[0], &option)) return 0; *gid = make_kgid(current_user_ns(), option); if (!gid_valid(*gid)) return 0; affs_set_opt(*mount_opts, SF_SETGID); break; case Opt_setuid: if (match_int(&args[0], &option)) return 0; *uid = make_kuid(current_user_ns(), option); if (!uid_valid(*uid)) return 0; affs_set_opt(*mount_opts, SF_SETUID); break; case Opt_verbose: affs_set_opt(*mount_opts, SF_VERBOSE); break; case Opt_volume: { char *vol = match_strdup(&args[0]); if (!vol) return 0; strscpy(volume, vol, 32); kfree(vol); break; } case Opt_ignore: /* Silently ignore the quota options */ break; default: pr_warn("Unrecognized mount option \"%s\" or missing value\n", p); return 0; } } return 1; } static int affs_show_options(struct seq_file *m, struct dentry *root) { struct super_block *sb = root->d_sb; struct affs_sb_info *sbi = AFFS_SB(sb); if (sb->s_blocksize) seq_printf(m, ",bs=%lu", sb->s_blocksize); if (affs_test_opt(sbi->s_flags, SF_SETMODE)) seq_printf(m, ",mode=%o", sbi->s_mode); if (affs_test_opt(sbi->s_flags, SF_MUFS)) seq_puts(m, ",mufs"); if (affs_test_opt(sbi->s_flags, SF_NO_TRUNCATE)) seq_puts(m, ",nofilenametruncate"); if (affs_test_opt(sbi->s_flags, SF_PREFIX)) seq_printf(m, ",prefix=%s", sbi->s_prefix); if (affs_test_opt(sbi->s_flags, SF_IMMUTABLE)) seq_puts(m, ",protect"); if (sbi->s_reserved != 2) seq_printf(m, ",reserved=%u", sbi->s_reserved); if (sbi->s_root_block != (sbi->s_reserved + sbi->s_partition_size - 1) / 2) seq_printf(m, ",root=%u", sbi->s_root_block); if (affs_test_opt(sbi->s_flags, SF_SETGID)) seq_printf(m, ",setgid=%u", from_kgid_munged(&init_user_ns, sbi->s_gid)); if (affs_test_opt(sbi->s_flags, SF_SETUID)) seq_printf(m, ",setuid=%u", from_kuid_munged(&init_user_ns, sbi->s_uid)); if (affs_test_opt(sbi->s_flags, SF_VERBOSE)) seq_puts(m, ",verbose"); if (sbi->s_volume[0]) seq_printf(m, ",volume=%s", sbi->s_volume); return 0; } /* This function definitely needs to be split up. Some fine day I'll * hopefully have the guts to do so. Until then: sorry for the mess. */ static int affs_fill_super(struct super_block *sb, void *data, int silent) { struct affs_sb_info *sbi; struct buffer_head *root_bh = NULL; struct buffer_head *boot_bh; struct inode *root_inode = NULL; s32 root_block; int size, blocksize; u32 chksum; int num_bm; int i, j; kuid_t uid; kgid_t gid; int reserved; unsigned long mount_flags; int tmp_flags; /* fix remount prototype... */ u8 sig[4]; int ret; pr_debug("read_super(%s)\n", data ? (const char *)data : "no options"); sb->s_magic = AFFS_SUPER_MAGIC; sb->s_op = &affs_sops; sb->s_flags |= SB_NODIRATIME; sb->s_time_gran = NSEC_PER_SEC; sb->s_time_min = sys_tz.tz_minuteswest * 60 + AFFS_EPOCH_DELTA; sb->s_time_max = 86400LL * U32_MAX + 86400 + sb->s_time_min; sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sbi->sb = sb; mutex_init(&sbi->s_bmlock); spin_lock_init(&sbi->symlink_lock); spin_lock_init(&sbi->work_lock); INIT_DELAYED_WORK(&sbi->sb_work, flush_superblock); if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, &blocksize,&sbi->s_prefix, sbi->s_volume, &mount_flags)) { pr_err("Error parsing options\n"); return -EINVAL; } /* N.B. after this point s_prefix must be released */ sbi->s_flags = mount_flags; sbi->s_mode = i; sbi->s_uid = uid; sbi->s_gid = gid; sbi->s_reserved= reserved; /* Get the size of the device in 512-byte blocks. * If we later see that the partition uses bigger * blocks, we will have to change it. */ size = bdev_nr_sectors(sb->s_bdev); pr_debug("initial blocksize=%d, #blocks=%d\n", 512, size); affs_set_blocksize(sb, PAGE_SIZE); /* Try to find root block. Its location depends on the block size. */ i = bdev_logical_block_size(sb->s_bdev); j = PAGE_SIZE; if (blocksize > 0) { i = j = blocksize; size = size / (blocksize / 512); } for (blocksize = i; blocksize <= j; blocksize <<= 1, size >>= 1) { sbi->s_root_block = root_block; if (root_block < 0) sbi->s_root_block = (reserved + size - 1) / 2; pr_debug("setting blocksize to %d\n", blocksize); affs_set_blocksize(sb, blocksize); sbi->s_partition_size = size; /* The root block location that was calculated above is not * correct if the partition size is an odd number of 512- * byte blocks, which will be rounded down to a number of * 1024-byte blocks, and if there were an even number of * reserved blocks. Ideally, all partition checkers should * report the real number of blocks of the real blocksize, * but since this just cannot be done, we have to try to * find the root block anyways. In the above case, it is one * block behind the calculated one. So we check this one, too. */ for (num_bm = 0; num_bm < 2; num_bm++) { pr_debug("Dev %s, trying root=%u, bs=%d, " "size=%d, reserved=%d\n", sb->s_id, sbi->s_root_block + num_bm, blocksize, size, reserved); root_bh = affs_bread(sb, sbi->s_root_block + num_bm); if (!root_bh) continue; if (!affs_checksum_block(sb, root_bh) && be32_to_cpu(AFFS_ROOT_HEAD(root_bh)->ptype) == T_SHORT && be32_to_cpu(AFFS_ROOT_TAIL(sb, root_bh)->stype) == ST_ROOT) { sbi->s_hashsize = blocksize / 4 - 56; sbi->s_root_block += num_bm; goto got_root; } affs_brelse(root_bh); root_bh = NULL; } } if (!silent) pr_err("No valid root block on device %s\n", sb->s_id); return -EINVAL; /* N.B. after this point bh must be released */ got_root: /* Keep super block in cache */ sbi->s_root_bh = root_bh; root_block = sbi->s_root_block; /* Find out which kind of FS we have */ boot_bh = sb_bread(sb, 0); if (!boot_bh) { pr_err("Cannot read boot block\n"); return -EINVAL; } memcpy(sig, boot_bh->b_data, 4); brelse(boot_bh); chksum = be32_to_cpu(*(__be32 *)sig); /* Dircache filesystems are compatible with non-dircache ones * when reading. As long as they aren't supported, writing is * not recommended. */ if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS || chksum == MUFS_DCOFS) && !sb_rdonly(sb)) { pr_notice("Dircache FS - mounting %s read only\n", sb->s_id); sb->s_flags |= SB_RDONLY; } switch (chksum) { case MUFS_FS: case MUFS_INTLFFS: case MUFS_DCFFS: affs_set_opt(sbi->s_flags, SF_MUFS); fallthrough; case FS_INTLFFS: case FS_DCFFS: affs_set_opt(sbi->s_flags, SF_INTL); break; case MUFS_FFS: affs_set_opt(sbi->s_flags, SF_MUFS); break; case FS_FFS: break; case MUFS_OFS: affs_set_opt(sbi->s_flags, SF_MUFS); fallthrough; case FS_OFS: affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= SB_NOEXEC; break; case MUFS_DCOFS: case MUFS_INTLOFS: affs_set_opt(sbi->s_flags, SF_MUFS); fallthrough; case FS_DCOFS: case FS_INTLOFS: affs_set_opt(sbi->s_flags, SF_INTL); affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= SB_NOEXEC; break; default: pr_err("Unknown filesystem on device %s: %08X\n", sb->s_id, chksum); return -EINVAL; } if (affs_test_opt(mount_flags, SF_VERBOSE)) { u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0]; pr_notice("Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n", len > 31 ? 31 : len, AFFS_ROOT_TAIL(sb, root_bh)->disk_name + 1, sig, sig[3] + '0', blocksize); } sb->s_flags |= SB_NODEV | SB_NOSUID; sbi->s_data_blksize = sb->s_blocksize; if (affs_test_opt(sbi->s_flags, SF_OFS)) sbi->s_data_blksize -= 24; tmp_flags = sb->s_flags; ret = affs_init_bitmap(sb, &tmp_flags); if (ret) return ret; sb->s_flags = tmp_flags; /* set up enough so that it can read an inode */ root_inode = affs_iget(sb, root_block); if (IS_ERR(root_inode)) return PTR_ERR(root_inode); if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_INTL)) sb->s_d_op = &affs_intl_dentry_operations; else sb->s_d_op = &affs_dentry_operations; sb->s_root = d_make_root(root_inode); if (!sb->s_root) { pr_err("AFFS: Get root inode failed\n"); return -ENOMEM; } sb->s_export_op = &affs_export_ops; pr_debug("s_flags=%lX\n", sb->s_flags); return 0; } static int affs_remount(struct super_block *sb, int *flags, char *data) { struct affs_sb_info *sbi = AFFS_SB(sb); int blocksize; kuid_t uid; kgid_t gid; int mode; int reserved; int root_block; unsigned long mount_flags; int res = 0; char volume[32]; char *prefix = NULL; pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); sync_filesystem(sb); *flags |= SB_NODIRATIME; memcpy(volume, sbi->s_volume, 32); if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block, &blocksize, &prefix, volume, &mount_flags)) { kfree(prefix); return -EINVAL; } flush_delayed_work(&sbi->sb_work); sbi->s_flags = mount_flags; sbi->s_mode = mode; sbi->s_uid = uid; sbi->s_gid = gid; /* protect against readers */ spin_lock(&sbi->symlink_lock); if (prefix) { kfree(sbi->s_prefix); sbi->s_prefix = prefix; } memcpy(sbi->s_volume, volume, 32); spin_unlock(&sbi->symlink_lock); if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) return 0; if (*flags & SB_RDONLY) affs_free_bitmap(sb); else res = affs_init_bitmap(sb, flags); return res; } static int affs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; int free; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); pr_debug("%s() partsize=%d, reserved=%d\n", __func__, AFFS_SB(sb)->s_partition_size, AFFS_SB(sb)->s_reserved); free = affs_count_free_blocks(sb); buf->f_type = AFFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = AFFS_SB(sb)->s_partition_size - AFFS_SB(sb)->s_reserved; buf->f_bfree = free; buf->f_bavail = free; buf->f_fsid = u64_to_fsid(id); buf->f_namelen = AFFSNAMEMAX; return 0; } static struct dentry *affs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, affs_fill_super); } static void affs_kill_sb(struct super_block *sb) { struct affs_sb_info *sbi = AFFS_SB(sb); kill_block_super(sb); if (sbi) { affs_free_bitmap(sb); affs_brelse(sbi->s_root_bh); kfree(sbi->s_prefix); mutex_destroy(&sbi->s_bmlock); kfree_rcu(sbi, rcu); } } static struct file_system_type affs_fs_type = { .owner = THIS_MODULE, .name = "affs", .mount = affs_mount, .kill_sb = affs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("affs"); static int __init init_affs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&affs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_affs_fs(void) { unregister_filesystem(&affs_fs_type); destroy_inodecache(); } MODULE_DESCRIPTION("Amiga filesystem support for Linux"); MODULE_LICENSE("GPL"); module_init(init_affs_fs) module_exit(exit_affs_fs)
28 12 15 14 4 2 2 16 16 16 16 4 16 16 16 16 16 16 16 16 14 14 16 16 16 14 16 16 14 14 16 3 13 12 13 13 12 12 12 16 16 16 16 16 16 16 16 16 16 16 28 1049 414 1019 352 1020 1048 1048 351 193 2339 922 1522 223 4 120 413 1049 794 243 286 562 563 201 701 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 /* * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "core_priv.h" #include <linux/in.h> #include <linux/in6.h> /* For in6_dev_get/in6_dev_put */ #include <net/addrconf.h> #include <net/bonding.h> #include <rdma/ib_cache.h> #include <rdma/ib_addr.h> static struct workqueue_struct *gid_cache_wq; enum gid_op_type { GID_DEL = 0, GID_ADD }; struct update_gid_event_work { struct work_struct work; union ib_gid gid; struct ib_gid_attr gid_attr; enum gid_op_type gid_op; }; #define ROCE_NETDEV_CALLBACK_SZ 3 struct netdev_event_work_cmd { roce_netdev_callback cb; roce_netdev_filter filter; struct net_device *ndev; struct net_device *filter_ndev; }; struct netdev_event_work { struct work_struct work; struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ]; }; static const struct { bool (*is_supported)(const struct ib_device *device, u32 port_num); enum ib_gid_type gid_type; } PORT_CAP_TO_GID_TYPE[] = { {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE}, {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP}, }; #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE) unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port) { int i; unsigned int ret_flags = 0; if (!rdma_protocol_roce(ib_dev, port)) return 1UL << IB_GID_TYPE_IB; for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++) if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port)) ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type; return ret_flags; } EXPORT_SYMBOL(roce_gid_type_mask_support); static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, u32 port, union ib_gid *gid, struct ib_gid_attr *gid_attr) { int i; unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port); for (i = 0; i < IB_GID_TYPE_SIZE; i++) { if ((1UL << i) & gid_type_mask) { gid_attr->gid_type = i; switch (gid_op) { case GID_ADD: ib_cache_gid_add(ib_dev, port, gid, gid_attr); break; case GID_DEL: ib_cache_gid_del(ib_dev, port, gid, gid_attr); break; } } } } enum bonding_slave_state { BONDING_SLAVE_STATE_ACTIVE = 1UL << 0, BONDING_SLAVE_STATE_INACTIVE = 1UL << 1, /* No primary slave or the device isn't a slave in bonding */ BONDING_SLAVE_STATE_NA = 1UL << 2, }; static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev, struct net_device *upper) { if (upper && netif_is_bond_master(upper)) { struct net_device *pdev = bond_option_active_slave_get_rcu(netdev_priv(upper)); if (pdev) return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE : BONDING_SLAVE_STATE_INACTIVE; } return BONDING_SLAVE_STATE_NA; } #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \ BONDING_SLAVE_STATE_NA) static bool is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *real_dev; bool res; if (!rdma_ndev) return false; rcu_read_lock(); real_dev = rdma_vlan_dev_real_dev(cookie); if (!real_dev) real_dev = cookie; res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) && (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) & REQUIRED_BOND_STATES)) || real_dev == rdma_ndev); rcu_read_unlock(); return res; } static bool is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *master_dev; bool res; if (!rdma_ndev) return false; rcu_read_lock(); master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev); res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) == BONDING_SLAVE_STATE_INACTIVE; rcu_read_unlock(); return res; } /** * is_ndev_for_default_gid_filter - Check if a given netdevice * can be considered for default GIDs or not. * @ib_dev: IB device to check * @port: Port to consider for adding default GID * @rdma_ndev: rdma netdevice pointer * @cookie: Netdevice to consider to form a default GID * * is_ndev_for_default_gid_filter() returns true if a given netdevice can be * considered for deriving default RoCE GID, returns false otherwise. */ static bool is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; bool res; if (!rdma_ndev) return false; rcu_read_lock(); /* * When rdma netdevice is used in bonding, bonding master netdevice * should be considered for default GIDs. Therefore, ignore slave rdma * netdevices when bonding is considered. * Additionally when event(cookie) netdevice is bond master device, * make sure that it the upper netdevice of rdma netdevice. */ res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) || (netif_is_bond_master(cookie_ndev) && rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))); rcu_read_unlock(); return res; } static bool pass_all_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { return true; } static bool upper_device_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { bool res; if (!rdma_ndev) return false; if (rdma_ndev == cookie) return true; rcu_read_lock(); res = rdma_is_upper_dev_rcu(rdma_ndev, cookie); rcu_read_unlock(); return res; } /** * is_upper_ndev_bond_master_filter - Check if a given netdevice * is bond master device of netdevice of the RDMA device of port. * @ib_dev: IB device to check * @port: Port to consider for adding default GID * @rdma_ndev: Pointer to rdma netdevice * @cookie: Netdevice to consider to form a default GID * * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev * is bond master device and rdma_ndev is its lower netdevice. It might * not have been established as slave device yet. */ static bool is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; bool match = false; if (!rdma_ndev) return false; rcu_read_lock(); if (netif_is_bond_master(cookie_ndev) && rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)) match = true; rcu_read_unlock(); return match; } static void update_gid_ip(enum gid_op_type gid_op, struct ib_device *ib_dev, u32 port, struct net_device *ndev, struct sockaddr *addr) { union ib_gid gid; struct ib_gid_attr gid_attr; rdma_ip2gid(addr, &gid); memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; update_gid(gid_op, ib_dev, port, &gid, &gid_attr); } static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, struct net_device *event_ndev) { struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev); unsigned long gid_type_mask; if (!rdma_ndev) return; if (!real_dev) real_dev = event_ndev; rcu_read_lock(); if (((rdma_ndev != event_ndev && !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) || is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) == BONDING_SLAVE_STATE_INACTIVE)) { rcu_read_unlock(); return; } rcu_read_unlock(); gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask, IB_CACHE_GID_DEFAULT_MODE_DELETE); } static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { const struct in_ifaddr *ifa; struct in_device *in_dev; struct sin_list { struct list_head list; struct sockaddr_in ip; }; struct sin_list *sin_iter; struct sin_list *sin_temp; LIST_HEAD(sin_list); if (ndev->reg_state >= NETREG_UNREGISTERING) return; rcu_read_lock(); in_dev = __in_dev_get_rcu(ndev); if (!in_dev) { rcu_read_unlock(); return; } in_dev_for_each_ifa_rcu(ifa, in_dev) { struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) continue; entry->ip.sin_family = AF_INET; entry->ip.sin_addr.s_addr = ifa->ifa_address; list_add_tail(&entry->list, &sin_list); } rcu_read_unlock(); list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) { update_gid_ip(GID_ADD, ib_dev, port, ndev, (struct sockaddr *)&sin_iter->ip); list_del(&sin_iter->list); kfree(sin_iter); } } static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { struct inet6_ifaddr *ifp; struct inet6_dev *in6_dev; struct sin6_list { struct list_head list; struct sockaddr_in6 sin6; }; struct sin6_list *sin6_iter; struct sin6_list *sin6_temp; struct ib_gid_attr gid_attr = {.ndev = ndev}; LIST_HEAD(sin6_list); if (ndev->reg_state >= NETREG_UNREGISTERING) return; in6_dev = in6_dev_get(ndev); if (!in6_dev) return; read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) continue; entry->sin6.sin6_family = AF_INET6; entry->sin6.sin6_addr = ifp->addr; list_add_tail(&entry->list, &sin6_list); } read_unlock_bh(&in6_dev->lock); in6_dev_put(in6_dev); list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) { union ib_gid gid; rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid); update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr); list_del(&sin6_iter->list); kfree(sin6_iter); } } static void _add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *ndev) { enum_netdev_ipv4_ips(ib_dev, port, ndev); if (IS_ENABLED(CONFIG_IPV6)) enum_netdev_ipv6_ips(ib_dev, port, ndev); } static void add_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { _add_netdev_ips(ib_dev, port, cookie); } static void del_netdev_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie); } /** * del_default_gids - Delete default GIDs of the event/cookie netdevice * @ib_dev: RDMA device pointer * @port: Port of the RDMA device whose GID table to consider * @rdma_ndev: Unused rdma netdevice * @cookie: Pointer to event netdevice * * del_default_gids() deletes the default GIDs of the event/cookie netdevice. */ static void del_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *cookie_ndev = cookie; unsigned long gid_type_mask; gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask, IB_CACHE_GID_DEFAULT_MODE_DELETE); } static void add_default_gids(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *event_ndev = cookie; unsigned long gid_type_mask; gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask, IB_CACHE_GID_DEFAULT_MODE_SET); } static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net *net; struct net_device *ndev; /* Lock the rtnl to make sure the netdevs does not move under * our feet */ rtnl_lock(); down_read(&net_rwsem); for_each_net(net) for_each_netdev(net, ndev) { /* * Filter and add default GIDs of the primary netdevice * when not in bonding mode, or add default GIDs * of bond master device, when in bonding mode. */ if (is_ndev_for_default_gid_filter(ib_dev, port, rdma_ndev, ndev)) add_default_gids(ib_dev, port, rdma_ndev, ndev); if (is_eth_port_of_netdev_filter(ib_dev, port, rdma_ndev, ndev)) _add_netdev_ips(ib_dev, port, ndev); } up_read(&net_rwsem); rtnl_unlock(); } /** * rdma_roce_rescan_device - Rescan all of the network devices in the system * and add their gids, as needed, to the relevant RoCE devices. * * @ib_dev: the rdma device */ void rdma_roce_rescan_device(struct ib_device *ib_dev) { ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL, enum_all_gids_of_dev_cb, NULL); } EXPORT_SYMBOL(rdma_roce_rescan_device); static void callback_for_addr_gid_device_scan(struct ib_device *device, u32 port, struct net_device *rdma_ndev, void *cookie) { struct update_gid_event_work *parsed = cookie; return update_gid(parsed->gid_op, device, port, &parsed->gid, &parsed->gid_attr); } struct upper_list { struct list_head list; struct net_device *upper; }; static int netdev_upper_walk(struct net_device *upper, struct netdev_nested_priv *priv) { struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); struct list_head *upper_list = (struct list_head *)priv->data; if (!entry) return 0; list_add_tail(&entry->list, upper_list); dev_hold(upper); entry->upper = upper; return 0; } static void handle_netdev_upper(struct ib_device *ib_dev, u32 port, void *cookie, void (*handle_netdev)(struct ib_device *ib_dev, u32 port, struct net_device *ndev)) { struct net_device *ndev = cookie; struct netdev_nested_priv priv; struct upper_list *upper_iter; struct upper_list *upper_temp; LIST_HEAD(upper_list); priv.data = &upper_list; rcu_read_lock(); netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &priv); rcu_read_unlock(); handle_netdev(ib_dev, port, ndev); list_for_each_entry_safe(upper_iter, upper_temp, &upper_list, list) { handle_netdev(ib_dev, port, upper_iter->upper); dev_put(upper_iter->upper); list_del(&upper_iter->list); kfree(upper_iter); } } static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port, struct net_device *event_ndev) { ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); } static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); } static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips); } static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port, struct net_device *rdma_ndev, void *cookie) { struct net_device *master_ndev; rcu_read_lock(); master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev); dev_hold(master_ndev); rcu_read_unlock(); if (master_ndev) { bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev, master_ndev); dev_put(master_ndev); } } /* The following functions operate on all IB devices. netdevice_event and * addr_event execute ib_enum_all_roce_netdevs through a work. * ib_enum_all_roce_netdevs iterates through all IB devices. */ static void netdevice_event_work_handler(struct work_struct *_work) { struct netdev_event_work *work = container_of(_work, struct netdev_event_work, work); unsigned int i; for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) { ib_enum_all_roce_netdevs(work->cmds[i].filter, work->cmds[i].filter_ndev, work->cmds[i].cb, work->cmds[i].ndev); dev_put(work->cmds[i].ndev); dev_put(work->cmds[i].filter_ndev); } kfree(work); } static int netdevice_queue_work(struct netdev_event_work_cmd *cmds, struct net_device *ndev) { unsigned int i; struct netdev_event_work *ndev_work = kmalloc(sizeof(*ndev_work), GFP_KERNEL); if (!ndev_work) return NOTIFY_DONE; memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds)); for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) { if (!ndev_work->cmds[i].ndev) ndev_work->cmds[i].ndev = ndev; if (!ndev_work->cmds[i].filter_ndev) ndev_work->cmds[i].filter_ndev = ndev; dev_hold(ndev_work->cmds[i].ndev); dev_hold(ndev_work->cmds[i].filter_ndev); } INIT_WORK(&ndev_work->work, netdevice_event_work_handler); queue_work(gid_cache_wq, &ndev_work->work); return NOTIFY_DONE; } static const struct netdev_event_work_cmd add_cmd = { .cb = add_netdev_ips, .filter = is_eth_port_of_netdev_filter }; static const struct netdev_event_work_cmd add_cmd_upper_ips = { .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev_filter }; static void ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info, struct netdev_event_work_cmd *cmds) { static const struct netdev_event_work_cmd upper_ips_del_cmd = { .cb = del_netdev_upper_ips, .filter = upper_device_filter }; cmds[0] = upper_ips_del_cmd; cmds[0].ndev = changeupper_info->upper_dev; cmds[1] = add_cmd; } static const struct netdev_event_work_cmd bonding_default_add_cmd = { .cb = add_default_gids, .filter = is_upper_ndev_bond_master_filter }; static void ndev_event_link(struct net_device *event_ndev, struct netdev_notifier_changeupper_info *changeupper_info, struct netdev_event_work_cmd *cmds) { static const struct netdev_event_work_cmd bonding_default_del_cmd = { .cb = del_default_gids, .filter = is_upper_ndev_bond_master_filter }; /* * When a lower netdev is linked to its upper bonding * netdev, delete lower slave netdev's default GIDs. */ cmds[0] = bonding_default_del_cmd; cmds[0].ndev = event_ndev; cmds[0].filter_ndev = changeupper_info->upper_dev; /* Now add bonding upper device default GIDs */ cmds[1] = bonding_default_add_cmd; cmds[1].ndev = changeupper_info->upper_dev; cmds[1].filter_ndev = changeupper_info->upper_dev; /* Now add bonding upper device IP based GIDs */ cmds[2] = add_cmd_upper_ips; cmds[2].ndev = changeupper_info->upper_dev; cmds[2].filter_ndev = changeupper_info->upper_dev; } static void netdevice_event_changeupper(struct net_device *event_ndev, struct netdev_notifier_changeupper_info *changeupper_info, struct netdev_event_work_cmd *cmds) { if (changeupper_info->linking) ndev_event_link(event_ndev, changeupper_info, cmds); else ndev_event_unlink(changeupper_info, cmds); } static const struct netdev_event_work_cmd add_default_gid_cmd = { .cb = add_default_gids, .filter = is_ndev_for_default_gid_filter, }; static int netdevice_event(struct notifier_block *this, unsigned long event, void *ptr) { static const struct netdev_event_work_cmd del_cmd = { .cb = del_netdev_ips, .filter = pass_all_filter}; static const struct netdev_event_work_cmd bonding_default_del_cmd_join = { .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave_filter }; static const struct netdev_event_work_cmd netdev_del_cmd = { .cb = del_netdev_ips, .filter = is_eth_port_of_netdev_filter }; static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = { .cb = del_netdev_upper_ips, .filter = upper_device_filter}; struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} }; if (ndev->type != ARPHRD_ETHER) return NOTIFY_DONE; switch (event) { case NETDEV_REGISTER: case NETDEV_UP: cmds[0] = bonding_default_del_cmd_join; cmds[1] = add_default_gid_cmd; cmds[2] = add_cmd; break; case NETDEV_UNREGISTER: if (ndev->reg_state < NETREG_UNREGISTERED) cmds[0] = del_cmd; else return NOTIFY_DONE; break; case NETDEV_CHANGEADDR: cmds[0] = netdev_del_cmd; if (ndev->reg_state == NETREG_REGISTERED) { cmds[1] = add_default_gid_cmd; cmds[2] = add_cmd; } break; case NETDEV_CHANGEUPPER: netdevice_event_changeupper(ndev, container_of(ptr, struct netdev_notifier_changeupper_info, info), cmds); break; case NETDEV_BONDING_FAILOVER: cmds[0] = bonding_event_ips_del_cmd; /* Add default GIDs of the bond device */ cmds[1] = bonding_default_add_cmd; /* Add IP based GIDs of the bond device */ cmds[2] = add_cmd_upper_ips; break; default: return NOTIFY_DONE; } return netdevice_queue_work(cmds, ndev); } static void update_gid_event_work_handler(struct work_struct *_work) { struct update_gid_event_work *work = container_of(_work, struct update_gid_event_work, work); ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter, work->gid_attr.ndev, callback_for_addr_gid_device_scan, work); dev_put(work->gid_attr.ndev); kfree(work); } static int addr_event(struct notifier_block *this, unsigned long event, struct sockaddr *sa, struct net_device *ndev) { struct update_gid_event_work *work; enum gid_op_type gid_op; if (ndev->type != ARPHRD_ETHER) return NOTIFY_DONE; switch (event) { case NETDEV_UP: gid_op = GID_ADD; break; case NETDEV_DOWN: gid_op = GID_DEL; break; default: return NOTIFY_DONE; } work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) return NOTIFY_DONE; INIT_WORK(&work->work, update_gid_event_work_handler); rdma_ip2gid(sa, &work->gid); work->gid_op = gid_op; memset(&work->gid_attr, 0, sizeof(work->gid_attr)); dev_hold(ndev); work->gid_attr.ndev = ndev; queue_work(gid_cache_wq, &work->work); return NOTIFY_DONE; } static int inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct sockaddr_in in; struct net_device *ndev; struct in_ifaddr *ifa = ptr; in.sin_family = AF_INET; in.sin_addr.s_addr = ifa->ifa_address; ndev = ifa->ifa_dev->dev; return addr_event(this, event, (struct sockaddr *)&in, ndev); } static int inet6addr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct sockaddr_in6 in6; struct net_device *ndev; struct inet6_ifaddr *ifa6 = ptr; in6.sin6_family = AF_INET6; in6.sin6_addr = ifa6->addr; ndev = ifa6->idev->dev; return addr_event(this, event, (struct sockaddr *)&in6, ndev); } static struct notifier_block nb_netdevice = { .notifier_call = netdevice_event }; static struct notifier_block nb_inetaddr = { .notifier_call = inetaddr_event }; static struct notifier_block nb_inet6addr = { .notifier_call = inet6addr_event }; int __init roce_gid_mgmt_init(void) { gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0); if (!gid_cache_wq) return -ENOMEM; register_inetaddr_notifier(&nb_inetaddr); if (IS_ENABLED(CONFIG_IPV6)) register_inet6addr_notifier(&nb_inet6addr); /* We relay on the netdevice notifier to enumerate all * existing devices in the system. Register to this notifier * last to make sure we will not miss any IP add/del * callbacks. */ register_netdevice_notifier(&nb_netdevice); return 0; } void __exit roce_gid_mgmt_cleanup(void) { if (IS_ENABLED(CONFIG_IPV6)) unregister_inet6addr_notifier(&nb_inet6addr); unregister_inetaddr_notifier(&nb_inetaddr); unregister_netdevice_notifier(&nb_netdevice); /* Ensure all gid deletion tasks complete before we go down, * to avoid any reference to free'd memory. By the time * ib-core is removed, all physical devices have been removed, * so no issue with remaining hardware contexts. */ destroy_workqueue(gid_cache_wq); }
2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 // SPDX-License-Identifier: GPL-2.0-or-later /* * Behringer BCD2000 driver * * Copyright (C) 2014 Mario Kicherer (dev@kicherer.org) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitmap.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #define PREFIX "snd-bcd2000: " #define BUFSIZE 64 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1397, 0x00bd) }, { }, }; static const unsigned char device_cmd_prefix[] = {0x03, 0x00}; static const unsigned char bcd2000_init_sequence[] = { 0x07, 0x00, 0x00, 0x00, 0x78, 0x48, 0x1c, 0x81, 0xc4, 0x00, 0x00, 0x00, 0x5e, 0x53, 0x4a, 0xf7, 0x18, 0xfa, 0x11, 0xff, 0x6c, 0xf3, 0x90, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x18, 0xfa, 0x11, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x34, 0x4a, 0xf7, 0x18, 0xfa, 0x11, 0xff }; struct bcd2000 { struct usb_device *dev; struct snd_card *card; struct usb_interface *intf; int card_index; int midi_out_active; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *midi_receive_substream; struct snd_rawmidi_substream *midi_out_substream; unsigned char midi_in_buf[BUFSIZE]; unsigned char midi_out_buf[BUFSIZE]; struct urb *midi_out_urb; struct urb *midi_in_urb; struct usb_anchor anchor; }; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static DEFINE_MUTEX(devices_mutex); static DECLARE_BITMAP(devices_used, SNDRV_CARDS); static struct usb_driver bcd2000_driver; #ifdef CONFIG_SND_DEBUG static void bcd2000_dump_buffer(const char *prefix, const char *buf, int len) { print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, 16, 1, buf, len, false); } #else static void bcd2000_dump_buffer(const char *prefix, const char *buf, int len) {} #endif static int bcd2000_midi_input_open(struct snd_rawmidi_substream *substream) { return 0; } static int bcd2000_midi_input_close(struct snd_rawmidi_substream *substream) { return 0; } /* (de)register midi substream from client */ static void bcd2000_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct bcd2000 *bcd2k = substream->rmidi->private_data; bcd2k->midi_receive_substream = up ? substream : NULL; } static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k, const unsigned char *buf, unsigned int buf_len) { unsigned int payload_length, tocopy; struct snd_rawmidi_substream *midi_receive_substream; midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream); if (!midi_receive_substream) return; bcd2000_dump_buffer(PREFIX "received from device: ", buf, buf_len); if (buf_len < 2) return; payload_length = buf[0]; /* ignore packets without payload */ if (payload_length == 0) return; tocopy = min(payload_length, buf_len-1); bcd2000_dump_buffer(PREFIX "sending to userspace: ", &buf[1], tocopy); snd_rawmidi_receive(midi_receive_substream, &buf[1], tocopy); } static void bcd2000_midi_send(struct bcd2000 *bcd2k) { int len, ret; struct snd_rawmidi_substream *midi_out_substream; BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE); midi_out_substream = READ_ONCE(bcd2k->midi_out_substream); if (!midi_out_substream) return; /* copy command prefix bytes */ memcpy(bcd2k->midi_out_buf, device_cmd_prefix, sizeof(device_cmd_prefix)); /* * get MIDI packet and leave space for command prefix * and payload length */ len = snd_rawmidi_transmit(midi_out_substream, bcd2k->midi_out_buf + 3, BUFSIZE - 3); if (len < 0) dev_err(&bcd2k->dev->dev, "%s: snd_rawmidi_transmit error %d\n", __func__, len); if (len <= 0) return; /* set payload length */ bcd2k->midi_out_buf[2] = len; bcd2k->midi_out_urb->transfer_buffer_length = BUFSIZE; bcd2000_dump_buffer(PREFIX "sending to device: ", bcd2k->midi_out_buf, len+3); /* send packet to the BCD2000 */ ret = usb_submit_urb(bcd2k->midi_out_urb, GFP_ATOMIC); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s (%p): usb_submit_urb() failed, ret=%d, len=%d\n", __func__, midi_out_substream, ret, len); else bcd2k->midi_out_active = 1; } static int bcd2000_midi_output_open(struct snd_rawmidi_substream *substream) { return 0; } static int bcd2000_midi_output_close(struct snd_rawmidi_substream *substream) { struct bcd2000 *bcd2k = substream->rmidi->private_data; if (bcd2k->midi_out_active) { usb_kill_urb(bcd2k->midi_out_urb); bcd2k->midi_out_active = 0; } return 0; } /* (de)register midi substream from client */ static void bcd2000_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { struct bcd2000 *bcd2k = substream->rmidi->private_data; if (up) { bcd2k->midi_out_substream = substream; /* check if there is data userspace wants to send */ if (!bcd2k->midi_out_active) bcd2000_midi_send(bcd2k); } else { bcd2k->midi_out_substream = NULL; } } static void bcd2000_output_complete(struct urb *urb) { struct bcd2000 *bcd2k = urb->context; bcd2k->midi_out_active = 0; if (urb->status) dev_warn(&urb->dev->dev, PREFIX "output urb->status: %d\n", urb->status); if (urb->status == -ESHUTDOWN) return; /* check if there is more data userspace wants to send */ bcd2000_midi_send(bcd2k); } static void bcd2000_input_complete(struct urb *urb) { int ret; struct bcd2000 *bcd2k = urb->context; if (urb->status) dev_warn(&urb->dev->dev, PREFIX "input urb->status: %i\n", urb->status); if (!bcd2k || urb->status == -ESHUTDOWN) return; if (urb->actual_length > 0) bcd2000_midi_handle_input(bcd2k, urb->transfer_buffer, urb->actual_length); /* return URB to device */ ret = usb_submit_urb(bcd2k->midi_in_urb, GFP_ATOMIC); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() failed, ret=%d\n", __func__, ret); } static const struct snd_rawmidi_ops bcd2000_midi_output = { .open = bcd2000_midi_output_open, .close = bcd2000_midi_output_close, .trigger = bcd2000_midi_output_trigger, }; static const struct snd_rawmidi_ops bcd2000_midi_input = { .open = bcd2000_midi_input_open, .close = bcd2000_midi_input_close, .trigger = bcd2000_midi_input_trigger, }; static void bcd2000_init_device(struct bcd2000 *bcd2k) { int ret; init_usb_anchor(&bcd2k->anchor); usb_anchor_urb(bcd2k->midi_out_urb, &bcd2k->anchor); usb_anchor_urb(bcd2k->midi_in_urb, &bcd2k->anchor); /* copy init sequence into buffer */ memcpy(bcd2k->midi_out_buf, bcd2000_init_sequence, 52); bcd2k->midi_out_urb->transfer_buffer_length = 52; /* submit sequence */ ret = usb_submit_urb(bcd2k->midi_out_urb, GFP_KERNEL); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() out failed, ret=%d: ", __func__, ret); else bcd2k->midi_out_active = 1; /* pass URB to device to enable button and controller events */ ret = usb_submit_urb(bcd2k->midi_in_urb, GFP_KERNEL); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() in failed, ret=%d: ", __func__, ret); /* ensure initialization is finished */ usb_wait_anchor_empty_timeout(&bcd2k->anchor, 1000); } static int bcd2000_init_midi(struct bcd2000 *bcd2k) { int ret; struct snd_rawmidi *rmidi; ret = snd_rawmidi_new(bcd2k->card, bcd2k->card->shortname, 0, 1, /* output */ 1, /* input */ &rmidi); if (ret < 0) return ret; strscpy(rmidi->name, bcd2k->card->shortname, sizeof(rmidi->name)); rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = bcd2k; rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &bcd2000_midi_output); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &bcd2000_midi_input); bcd2k->rmidi = rmidi; bcd2k->midi_in_urb = usb_alloc_urb(0, GFP_KERNEL); bcd2k->midi_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!bcd2k->midi_in_urb || !bcd2k->midi_out_urb) { dev_err(&bcd2k->dev->dev, PREFIX "usb_alloc_urb failed\n"); return -ENOMEM; } usb_fill_int_urb(bcd2k->midi_in_urb, bcd2k->dev, usb_rcvintpipe(bcd2k->dev, 0x81), bcd2k->midi_in_buf, BUFSIZE, bcd2000_input_complete, bcd2k, 1); usb_fill_int_urb(bcd2k->midi_out_urb, bcd2k->dev, usb_sndintpipe(bcd2k->dev, 0x1), bcd2k->midi_out_buf, BUFSIZE, bcd2000_output_complete, bcd2k, 1); /* sanity checks of EPs before actually submitting */ if (usb_urb_ep_type_check(bcd2k->midi_in_urb) || usb_urb_ep_type_check(bcd2k->midi_out_urb)) { dev_err(&bcd2k->dev->dev, "invalid MIDI EP\n"); return -EINVAL; } bcd2000_init_device(bcd2k); return 0; } static void bcd2000_free_usb_related_resources(struct bcd2000 *bcd2k, struct usb_interface *interface) { usb_kill_urb(bcd2k->midi_out_urb); usb_kill_urb(bcd2k->midi_in_urb); usb_free_urb(bcd2k->midi_out_urb); usb_free_urb(bcd2k->midi_in_urb); if (bcd2k->intf) { usb_set_intfdata(bcd2k->intf, NULL); bcd2k->intf = NULL; } } static int bcd2000_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { struct snd_card *card; struct bcd2000 *bcd2k; unsigned int card_index; char usb_path[32]; int err; mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) if (!test_bit(card_index, devices_used)) break; if (card_index >= SNDRV_CARDS) { mutex_unlock(&devices_mutex); return -ENOENT; } err = snd_card_new(&interface->dev, index[card_index], id[card_index], THIS_MODULE, sizeof(*bcd2k), &card); if (err < 0) { mutex_unlock(&devices_mutex); return err; } bcd2k = card->private_data; bcd2k->dev = interface_to_usbdev(interface); bcd2k->card = card; bcd2k->card_index = card_index; bcd2k->intf = interface; snd_card_set_dev(card, &interface->dev); strscpy(card->driver, "snd-bcd2000", sizeof(card->driver)); strscpy(card->shortname, "BCD2000", sizeof(card->shortname)); usb_make_path(bcd2k->dev, usb_path, sizeof(usb_path)); snprintf(bcd2k->card->longname, sizeof(bcd2k->card->longname), "Behringer BCD2000 at %s", usb_path); err = bcd2000_init_midi(bcd2k); if (err < 0) goto probe_error; err = snd_card_register(card); if (err < 0) goto probe_error; usb_set_intfdata(interface, bcd2k); set_bit(card_index, devices_used); mutex_unlock(&devices_mutex); return 0; probe_error: dev_info(&bcd2k->dev->dev, PREFIX "error during probing"); bcd2000_free_usb_related_resources(bcd2k, interface); snd_card_free(card); mutex_unlock(&devices_mutex); return err; } static void bcd2000_disconnect(struct usb_interface *interface) { struct bcd2000 *bcd2k = usb_get_intfdata(interface); if (!bcd2k) return; mutex_lock(&devices_mutex); /* make sure that userspace cannot create new requests */ snd_card_disconnect(bcd2k->card); bcd2000_free_usb_related_resources(bcd2k, interface); clear_bit(bcd2k->card_index, devices_used); snd_card_free_when_closed(bcd2k->card); mutex_unlock(&devices_mutex); } static struct usb_driver bcd2000_driver = { .name = "snd-bcd2000", .probe = bcd2000_probe, .disconnect = bcd2000_disconnect, .id_table = id_table, }; module_usb_driver(bcd2000_driver); MODULE_DEVICE_TABLE(usb, id_table); MODULE_AUTHOR("Mario Kicherer, dev@kicherer.org"); MODULE_DESCRIPTION("Behringer BCD2000 driver"); MODULE_LICENSE("GPL");
10 7 17 9 6 1 24 25 18 7 24 23 1 24 11 19 17 30 29 22 36 3 9 2 4 52 52 2 25 41 41 26 16 52 53 48 44 41 26 16 15 25 25 24 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Fraunhofer ITWM * * Written by: * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de> */ #include <linux/ieee802154.h> #include <net/mac802154.h> #include <net/ieee802154_netdev.h> static int ieee802154_hdr_push_addr(u8 *buf, const struct ieee802154_addr *addr, bool omit_pan) { int pos = 0; if (addr->mode == IEEE802154_ADDR_NONE) return 0; if (!omit_pan) { memcpy(buf + pos, &addr->pan_id, 2); pos += 2; } switch (addr->mode) { case IEEE802154_ADDR_SHORT: memcpy(buf + pos, &addr->short_addr, 2); pos += 2; break; case IEEE802154_ADDR_LONG: memcpy(buf + pos, &addr->extended_addr, IEEE802154_ADDR_LEN); pos += IEEE802154_ADDR_LEN; break; default: return -EINVAL; } return pos; } static int ieee802154_hdr_push_sechdr(u8 *buf, const struct ieee802154_sechdr *hdr) { int pos = 5; memcpy(buf, hdr, 1); memcpy(buf + 1, &hdr->frame_counter, 4); switch (hdr->key_id_mode) { case IEEE802154_SCF_KEY_IMPLICIT: return pos; case IEEE802154_SCF_KEY_INDEX: break; case IEEE802154_SCF_KEY_SHORT_INDEX: memcpy(buf + pos, &hdr->short_src, 4); pos += 4; break; case IEEE802154_SCF_KEY_HW_INDEX: memcpy(buf + pos, &hdr->extended_src, IEEE802154_ADDR_LEN); pos += IEEE802154_ADDR_LEN; break; } buf[pos++] = hdr->key_id; return pos; } int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr) { u8 buf[IEEE802154_MAX_HEADER_LEN]; int pos = 2; int rc; struct ieee802154_hdr_fc *fc = &hdr->fc; buf[pos++] = hdr->seq; fc->dest_addr_mode = hdr->dest.mode; rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false); if (rc < 0) return -EINVAL; pos += rc; fc->source_addr_mode = hdr->source.mode; if (hdr->source.pan_id == hdr->dest.pan_id && hdr->dest.mode != IEEE802154_ADDR_NONE) fc->intra_pan = true; rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan); if (rc < 0) return -EINVAL; pos += rc; if (fc->security_enabled) { fc->version = 1; rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec); if (rc < 0) return -EINVAL; pos += rc; } memcpy(buf, fc, 2); memcpy(skb_push(skb, pos), buf, pos); return pos; } EXPORT_SYMBOL_GPL(ieee802154_hdr_push); int ieee802154_mac_cmd_push(struct sk_buff *skb, void *f, const void *pl, unsigned int pl_len) { struct ieee802154_mac_cmd_frame *frame = f; struct ieee802154_mac_cmd_pl *mac_pl = &frame->mac_pl; struct ieee802154_hdr *mhr = &frame->mhr; int ret; skb_reserve(skb, sizeof(*mhr)); ret = ieee802154_hdr_push(skb, mhr); if (ret < 0) return ret; skb_reset_mac_header(skb); skb->mac_len = ret; skb_put_data(skb, mac_pl, sizeof(*mac_pl)); skb_put_data(skb, pl, pl_len); return 0; } EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_push); int ieee802154_beacon_push(struct sk_buff *skb, struct ieee802154_beacon_frame *beacon) { struct ieee802154_beacon_hdr *mac_pl = &beacon->mac_pl; struct ieee802154_hdr *mhr = &beacon->mhr; int ret; skb_reserve(skb, sizeof(*mhr)); ret = ieee802154_hdr_push(skb, mhr); if (ret < 0) return ret; skb_reset_mac_header(skb); skb->mac_len = ret; skb_put_data(skb, mac_pl, sizeof(*mac_pl)); if (mac_pl->pend_short_addr_count || mac_pl->pend_ext_addr_count) return -EOPNOTSUPP; return 0; } EXPORT_SYMBOL_GPL(ieee802154_beacon_push); static int ieee802154_hdr_get_addr(const u8 *buf, int mode, bool omit_pan, struct ieee802154_addr *addr) { int pos = 0; addr->mode = mode; if (mode == IEEE802154_ADDR_NONE) return 0; if (!omit_pan) { memcpy(&addr->pan_id, buf + pos, 2); pos += 2; } if (mode == IEEE802154_ADDR_SHORT) { memcpy(&addr->short_addr, buf + pos, 2); return pos + 2; } else { memcpy(&addr->extended_addr, buf + pos, IEEE802154_ADDR_LEN); return pos + IEEE802154_ADDR_LEN; } } static int ieee802154_hdr_addr_len(int mode, bool omit_pan) { int pan_len = omit_pan ? 0 : 2; switch (mode) { case IEEE802154_ADDR_NONE: return 0; case IEEE802154_ADDR_SHORT: return 2 + pan_len; case IEEE802154_ADDR_LONG: return IEEE802154_ADDR_LEN + pan_len; default: return -EINVAL; } } static int ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr) { int pos = 5; memcpy(hdr, buf, 1); memcpy(&hdr->frame_counter, buf + 1, 4); switch (hdr->key_id_mode) { case IEEE802154_SCF_KEY_IMPLICIT: return pos; case IEEE802154_SCF_KEY_INDEX: break; case IEEE802154_SCF_KEY_SHORT_INDEX: memcpy(&hdr->short_src, buf + pos, 4); pos += 4; break; case IEEE802154_SCF_KEY_HW_INDEX: memcpy(&hdr->extended_src, buf + pos, IEEE802154_ADDR_LEN); pos += IEEE802154_ADDR_LEN; break; } hdr->key_id = buf[pos++]; return pos; } static int ieee802154_sechdr_lengths[4] = { [IEEE802154_SCF_KEY_IMPLICIT] = 5, [IEEE802154_SCF_KEY_INDEX] = 6, [IEEE802154_SCF_KEY_SHORT_INDEX] = 10, [IEEE802154_SCF_KEY_HW_INDEX] = 14, }; static int ieee802154_hdr_sechdr_len(u8 sc) { return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)]; } static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr) { int dlen, slen; dlen = ieee802154_hdr_addr_len(hdr->fc.dest_addr_mode, false); slen = ieee802154_hdr_addr_len(hdr->fc.source_addr_mode, hdr->fc.intra_pan); if (slen < 0 || dlen < 0) return -EINVAL; return 3 + dlen + slen + hdr->fc.security_enabled; } static int ieee802154_hdr_get_addrs(const u8 *buf, struct ieee802154_hdr *hdr) { int pos = 0; pos += ieee802154_hdr_get_addr(buf + pos, hdr->fc.dest_addr_mode, false, &hdr->dest); pos += ieee802154_hdr_get_addr(buf + pos, hdr->fc.source_addr_mode, hdr->fc.intra_pan, &hdr->source); if (hdr->fc.intra_pan) hdr->source.pan_id = hdr->dest.pan_id; return pos; } int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr) { int pos = 3, rc; if (!pskb_may_pull(skb, 3)) return -EINVAL; memcpy(hdr, skb->data, 3); rc = ieee802154_hdr_minlen(hdr); if (rc < 0 || !pskb_may_pull(skb, rc)) return -EINVAL; pos += ieee802154_hdr_get_addrs(skb->data + pos, hdr); if (hdr->fc.security_enabled) { int want = pos + ieee802154_hdr_sechdr_len(skb->data[pos]); if (!pskb_may_pull(skb, want)) return -EINVAL; pos += ieee802154_hdr_get_sechdr(skb->data + pos, &hdr->sec); } skb_pull(skb, pos); return pos; } EXPORT_SYMBOL_GPL(ieee802154_hdr_pull); int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb, struct ieee802154_mac_cmd_pl *mac_pl) { if (!pskb_may_pull(skb, sizeof(*mac_pl))) return -EINVAL; memcpy(mac_pl, skb->data, sizeof(*mac_pl)); skb_pull(skb, sizeof(*mac_pl)); return 0; } EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_pl_pull); int ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr) { const u8 *buf = skb_mac_header(skb); int pos = 3, rc; if (buf + 3 > skb_tail_pointer(skb)) return -EINVAL; memcpy(hdr, buf, 3); rc = ieee802154_hdr_minlen(hdr); if (rc < 0 || buf + rc > skb_tail_pointer(skb)) return -EINVAL; pos += ieee802154_hdr_get_addrs(buf + pos, hdr); return pos; } EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs); int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr) { const u8 *buf = skb_mac_header(skb); int pos; pos = ieee802154_hdr_peek_addrs(skb, hdr); if (pos < 0) return -EINVAL; if (hdr->fc.security_enabled) { u8 key_id_mode = IEEE802154_SCF_KEY_ID_MODE(*(buf + pos)); int want = pos + ieee802154_sechdr_lengths[key_id_mode]; if (buf + want > skb_tail_pointer(skb)) return -EINVAL; pos += ieee802154_hdr_get_sechdr(buf + pos, &hdr->sec); } return pos; } EXPORT_SYMBOL_GPL(ieee802154_hdr_peek); int ieee802154_max_payload(const struct ieee802154_hdr *hdr) { int hlen = ieee802154_hdr_minlen(hdr); if (hdr->fc.security_enabled) { hlen += ieee802154_sechdr_lengths[hdr->sec.key_id_mode] - 1; hlen += ieee802154_sechdr_authtag_len(&hdr->sec); } return IEEE802154_MTU - hlen - IEEE802154_MFR_SIZE; } EXPORT_SYMBOL_GPL(ieee802154_max_payload);
3 27 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM v4l2 #if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_V4L2_H #include <linux/tracepoint.h> #include <media/videobuf2-v4l2.h> /* Enums require being exported to userspace, for user tool parsing */ #undef EM #undef EMe #define EM(a, b) TRACE_DEFINE_ENUM(a); #define EMe(a, b) TRACE_DEFINE_ENUM(a); #define show_type(type) \ __print_symbolic(type, SHOW_TYPE) #define SHOW_TYPE \ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" ) \ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" ) \ EM( V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" ) \ EM( V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" ) \ EM( V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" ) \ EM( V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" ) \ EM( V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" ) \ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" ) \ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \ EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \ EM( V4L2_BUF_TYPE_SDR_OUTPUT, "SDR_OUTPUT" ) \ EM( V4L2_BUF_TYPE_META_CAPTURE, "META_CAPTURE" ) \ EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" ) SHOW_TYPE #define show_field(field) \ __print_symbolic(field, SHOW_FIELD) #define SHOW_FIELD \ EM( V4L2_FIELD_ANY, "ANY" ) \ EM( V4L2_FIELD_NONE, "NONE" ) \ EM( V4L2_FIELD_TOP, "TOP" ) \ EM( V4L2_FIELD_BOTTOM, "BOTTOM" ) \ EM( V4L2_FIELD_INTERLACED, "INTERLACED" ) \ EM( V4L2_FIELD_SEQ_TB, "SEQ_TB" ) \ EM( V4L2_FIELD_SEQ_BT, "SEQ_BT" ) \ EM( V4L2_FIELD_ALTERNATE, "ALTERNATE" ) \ EM( V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" ) \ EMe( V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" ) SHOW_FIELD /* * Now redefine the EM() and EMe() macros to map the enums to the strings * that will be printed in the output. */ #undef EM #undef EMe #define EM(a, b) {a, b}, #define EMe(a, b) {a, b} /* V4L2_TC_TYPE_* are macros, not defines, they do not need processing */ #define show_timecode_type(type) \ __print_symbolic(type, \ { V4L2_TC_TYPE_24FPS, "24FPS" }, \ { V4L2_TC_TYPE_25FPS, "25FPS" }, \ { V4L2_TC_TYPE_30FPS, "30FPS" }, \ { V4L2_TC_TYPE_50FPS, "50FPS" }, \ { V4L2_TC_TYPE_60FPS, "60FPS" }) #define show_flags(flags) \ __print_flags(flags, "|", \ { V4L2_BUF_FLAG_MAPPED, "MAPPED" }, \ { V4L2_BUF_FLAG_QUEUED, "QUEUED" }, \ { V4L2_BUF_FLAG_DONE, "DONE" }, \ { V4L2_BUF_FLAG_KEYFRAME, "KEYFRAME" }, \ { V4L2_BUF_FLAG_PFRAME, "PFRAME" }, \ { V4L2_BUF_FLAG_BFRAME, "BFRAME" }, \ { V4L2_BUF_FLAG_ERROR, "ERROR" }, \ { V4L2_BUF_FLAG_TIMECODE, "TIMECODE" }, \ { V4L2_BUF_FLAG_PREPARED, "PREPARED" }, \ { V4L2_BUF_FLAG_NO_CACHE_INVALIDATE, "NO_CACHE_INVALIDATE" }, \ { V4L2_BUF_FLAG_NO_CACHE_CLEAN, "NO_CACHE_CLEAN" }, \ { V4L2_BUF_FLAG_TIMESTAMP_MASK, "TIMESTAMP_MASK" }, \ { V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN, "TIMESTAMP_UNKNOWN" }, \ { V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \ { V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" }, \ { V4L2_BUF_FLAG_LAST, "LAST" }) #define show_timecode_flags(flags) \ __print_flags(flags, "|", \ { V4L2_TC_FLAG_DROPFRAME, "DROPFRAME" }, \ { V4L2_TC_FLAG_COLORFRAME, "COLORFRAME" }, \ { V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \ { V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" }) DECLARE_EVENT_CLASS(v4l2_event_class, TP_PROTO(int minor, struct v4l2_buffer *buf), TP_ARGS(minor, buf), TP_STRUCT__entry( __field(int, minor) __field(u32, index) __field(u32, type) __field(u32, bytesused) __field(u32, flags) __field(u32, field) __field(s64, timestamp) __field(u32, timecode_type) __field(u32, timecode_flags) __field(u8, timecode_frames) __field(u8, timecode_seconds) __field(u8, timecode_minutes) __field(u8, timecode_hours) __field(u8, timecode_userbits0) __field(u8, timecode_userbits1) __field(u8, timecode_userbits2) __field(u8, timecode_userbits3) __field(u32, sequence) ), TP_fast_assign( __entry->minor = minor; __entry->index = buf->index; __entry->type = buf->type; __entry->bytesused = buf->bytesused; __entry->flags = buf->flags; __entry->field = buf->field; __entry->timestamp = v4l2_buffer_get_timestamp(buf); __entry->timecode_type = buf->timecode.type; __entry->timecode_flags = buf->timecode.flags; __entry->timecode_frames = buf->timecode.frames; __entry->timecode_seconds = buf->timecode.seconds; __entry->timecode_minutes = buf->timecode.minutes; __entry->timecode_hours = buf->timecode.hours; __entry->timecode_userbits0 = buf->timecode.userbits[0]; __entry->timecode_userbits1 = buf->timecode.userbits[1]; __entry->timecode_userbits2 = buf->timecode.userbits[2]; __entry->timecode_userbits3 = buf->timecode.userbits[3]; __entry->sequence = buf->sequence; ), TP_printk("minor = %d, index = %u, type = %s, bytesused = %u, " "flags = %s, field = %s, timestamp = %llu, " "timecode = { type = %s, flags = %s, frames = %u, " "seconds = %u, minutes = %u, hours = %u, " "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor, __entry->index, show_type(__entry->type), __entry->bytesused, show_flags(__entry->flags), show_field(__entry->field), __entry->timestamp, show_timecode_type(__entry->timecode_type), show_timecode_flags(__entry->timecode_flags), __entry->timecode_frames, __entry->timecode_seconds, __entry->timecode_minutes, __entry->timecode_hours, __entry->timecode_userbits0, __entry->timecode_userbits1, __entry->timecode_userbits2, __entry->timecode_userbits3, __entry->sequence ) ) DEFINE_EVENT(v4l2_event_class, v4l2_dqbuf, TP_PROTO(int minor, struct v4l2_buffer *buf), TP_ARGS(minor, buf) ); DEFINE_EVENT(v4l2_event_class, v4l2_qbuf, TP_PROTO(int minor, struct v4l2_buffer *buf), TP_ARGS(minor, buf) ); DECLARE_EVENT_CLASS(vb2_v4l2_event_class, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb), TP_STRUCT__entry( __field(int, minor) __field(u32, flags) __field(u32, field) __field(u64, timestamp) __field(u32, timecode_type) __field(u32, timecode_flags) __field(u8, timecode_frames) __field(u8, timecode_seconds) __field(u8, timecode_minutes) __field(u8, timecode_hours) __field(u8, timecode_userbits0) __field(u8, timecode_userbits1) __field(u8, timecode_userbits2) __field(u8, timecode_userbits3) __field(u32, sequence) ), TP_fast_assign( struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct v4l2_fh *owner = q->owner; __entry->minor = owner ? owner->vdev->minor : -1; __entry->flags = vbuf->flags; __entry->field = vbuf->field; __entry->timestamp = vb->timestamp; __entry->timecode_type = vbuf->timecode.type; __entry->timecode_flags = vbuf->timecode.flags; __entry->timecode_frames = vbuf->timecode.frames; __entry->timecode_seconds = vbuf->timecode.seconds; __entry->timecode_minutes = vbuf->timecode.minutes; __entry->timecode_hours = vbuf->timecode.hours; __entry->timecode_userbits0 = vbuf->timecode.userbits[0]; __entry->timecode_userbits1 = vbuf->timecode.userbits[1]; __entry->timecode_userbits2 = vbuf->timecode.userbits[2]; __entry->timecode_userbits3 = vbuf->timecode.userbits[3]; __entry->sequence = vbuf->sequence; ), TP_printk("minor=%d flags = %s, field = %s, " "timestamp = %llu, timecode = { type = %s, flags = %s, " "frames = %u, seconds = %u, minutes = %u, hours = %u, " "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor, show_flags(__entry->flags), show_field(__entry->field), __entry->timestamp, show_timecode_type(__entry->timecode_type), show_timecode_flags(__entry->timecode_flags), __entry->timecode_frames, __entry->timecode_seconds, __entry->timecode_minutes, __entry->timecode_hours, __entry->timecode_userbits0, __entry->timecode_userbits1, __entry->timecode_userbits2, __entry->timecode_userbits3, __entry->sequence ) ) DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); #endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ #include <trace/define_trace.h>
1 1 4 2 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 // SPDX-License-Identifier: GPL-2.0+ /* * Apple Cinema Display driver * * Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch) * * Thanks to Caskey L. Dickson for his work with acdctl. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/backlight.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/atomic.h> #define APPLE_VENDOR_ID 0x05AC #define USB_REQ_GET_REPORT 0x01 #define USB_REQ_SET_REPORT 0x09 #define ACD_USB_TIMEOUT 250 #define ACD_USB_EDID 0x0302 #define ACD_USB_BRIGHTNESS 0x0310 #define ACD_BTN_NONE 0 #define ACD_BTN_BRIGHT_UP 3 #define ACD_BTN_BRIGHT_DOWN 4 #define ACD_URB_BUFFER_LEN 2 #define ACD_MSG_BUFFER_LEN 2 #define APPLEDISPLAY_DEVICE(prod) \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL, \ .idVendor = APPLE_VENDOR_ID, \ .idProduct = (prod), \ .bInterfaceClass = USB_CLASS_HID, \ .bInterfaceProtocol = 0x00 /* table of devices that work with this driver */ static const struct usb_device_id appledisplay_table[] = { { APPLEDISPLAY_DEVICE(0x9218) }, { APPLEDISPLAY_DEVICE(0x9219) }, { APPLEDISPLAY_DEVICE(0x921c) }, { APPLEDISPLAY_DEVICE(0x921d) }, { APPLEDISPLAY_DEVICE(0x9222) }, { APPLEDISPLAY_DEVICE(0x9226) }, { APPLEDISPLAY_DEVICE(0x9236) }, /* Terminating entry */ { } }; MODULE_DEVICE_TABLE(usb, appledisplay_table); /* Structure to hold all of our device specific stuff */ struct appledisplay { struct usb_device *udev; /* usb device */ struct urb *urb; /* usb request block */ struct backlight_device *bd; /* backlight device */ u8 *urbdata; /* interrupt URB data buffer */ u8 *msgdata; /* control message data buffer */ struct delayed_work work; int button_pressed; struct mutex sysfslock; /* concurrent read and write */ }; static atomic_t count_displays = ATOMIC_INIT(0); static void appledisplay_complete(struct urb *urb) { struct appledisplay *pdata = urb->context; struct device *dev = &pdata->udev->dev; int status = urb->status; int retval; switch (status) { case 0: /* success */ break; case -EOVERFLOW: dev_err(dev, "OVERFLOW with data length %d, actual length is %d\n", ACD_URB_BUFFER_LEN, pdata->urb->actual_length); fallthrough; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* This urb is terminated, clean up */ dev_dbg(dev, "%s - urb shuttingdown with status: %d\n", __func__, status); return; default: dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } switch(pdata->urbdata[1]) { case ACD_BTN_BRIGHT_UP: case ACD_BTN_BRIGHT_DOWN: pdata->button_pressed = 1; schedule_delayed_work(&pdata->work, 0); break; case ACD_BTN_NONE: default: pdata->button_pressed = 0; break; } exit: retval = usb_submit_urb(pdata->urb, GFP_ATOMIC); if (retval) { dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } } static int appledisplay_bl_update_status(struct backlight_device *bd) { struct appledisplay *pdata = bl_get_data(bd); int retval; mutex_lock(&pdata->sysfslock); pdata->msgdata[0] = 0x10; pdata->msgdata[1] = bd->props.brightness; retval = usb_control_msg( pdata->udev, usb_sndctrlpipe(pdata->udev, 0), USB_REQ_SET_REPORT, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ACD_USB_BRIGHTNESS, 0, pdata->msgdata, 2, ACD_USB_TIMEOUT); mutex_unlock(&pdata->sysfslock); if (retval < 0) return retval; else return 0; } static int appledisplay_bl_get_brightness(struct backlight_device *bd) { struct appledisplay *pdata = bl_get_data(bd); int retval, brightness; mutex_lock(&pdata->sysfslock); retval = usb_control_msg( pdata->udev, usb_rcvctrlpipe(pdata->udev, 0), USB_REQ_GET_REPORT, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ACD_USB_BRIGHTNESS, 0, pdata->msgdata, 2, ACD_USB_TIMEOUT); if (retval < 2) { if (retval >= 0) retval = -EMSGSIZE; } else { brightness = pdata->msgdata[1]; } mutex_unlock(&pdata->sysfslock); if (retval < 0) return retval; else return brightness; } static const struct backlight_ops appledisplay_bl_data = { .get_brightness = appledisplay_bl_get_brightness, .update_status = appledisplay_bl_update_status, }; static void appledisplay_work(struct work_struct *work) { struct appledisplay *pdata = container_of(work, struct appledisplay, work.work); int retval; retval = appledisplay_bl_get_brightness(pdata->bd); if (retval >= 0) pdata->bd->props.brightness = retval; /* Poll again in about 125ms if there's still a button pressed */ if (pdata->button_pressed) schedule_delayed_work(&pdata->work, HZ / 8); } static int appledisplay_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct backlight_properties props; struct appledisplay *pdata; struct usb_device *udev = interface_to_usbdev(iface); struct usb_endpoint_descriptor *endpoint; int int_in_endpointAddr = 0; int retval, brightness; char bl_name[20]; /* set up the endpoint information */ /* use only the first interrupt-in endpoint */ retval = usb_find_int_in_endpoint(iface->cur_altsetting, &endpoint); if (retval) { dev_err(&iface->dev, "Could not find int-in endpoint\n"); return retval; } int_in_endpointAddr = endpoint->bEndpointAddress; /* allocate memory for our device state and initialize it */ pdata = kzalloc(sizeof(struct appledisplay), GFP_KERNEL); if (!pdata) { retval = -ENOMEM; goto error; } pdata->udev = udev; INIT_DELAYED_WORK(&pdata->work, appledisplay_work); mutex_init(&pdata->sysfslock); /* Allocate buffer for control messages */ pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL); if (!pdata->msgdata) { retval = -ENOMEM; goto error; } /* Allocate interrupt URB */ pdata->urb = usb_alloc_urb(0, GFP_KERNEL); if (!pdata->urb) { retval = -ENOMEM; goto error; } /* Allocate buffer for interrupt data */ pdata->urbdata = usb_alloc_coherent(pdata->udev, ACD_URB_BUFFER_LEN, GFP_KERNEL, &pdata->urb->transfer_dma); if (!pdata->urbdata) { retval = -ENOMEM; dev_err(&iface->dev, "Allocating URB buffer failed\n"); goto error; } /* Configure interrupt URB */ usb_fill_int_urb(pdata->urb, udev, usb_rcvintpipe(udev, int_in_endpointAddr), pdata->urbdata, ACD_URB_BUFFER_LEN, appledisplay_complete, pdata, 1); pdata->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; if (usb_submit_urb(pdata->urb, GFP_KERNEL)) { retval = -EIO; dev_err(&iface->dev, "Submitting URB failed\n"); goto error; } /* Register backlight device */ snprintf(bl_name, sizeof(bl_name), "appledisplay%d", atomic_inc_return(&count_displays) - 1); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = 0xff; pdata->bd = backlight_device_register(bl_name, NULL, pdata, &appledisplay_bl_data, &props); if (IS_ERR(pdata->bd)) { dev_err(&iface->dev, "Backlight registration failed\n"); retval = PTR_ERR(pdata->bd); goto error; } /* Try to get brightness */ brightness = appledisplay_bl_get_brightness(pdata->bd); if (brightness < 0) { retval = brightness; dev_err(&iface->dev, "Error while getting initial brightness: %d\n", retval); goto error; } /* Set brightness in backlight device */ pdata->bd->props.brightness = brightness; /* save our data pointer in the interface device */ usb_set_intfdata(iface, pdata); printk(KERN_INFO "appledisplay: Apple Cinema Display connected\n"); return 0; error: if (pdata) { if (pdata->urb) { usb_kill_urb(pdata->urb); cancel_delayed_work_sync(&pdata->work); usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, pdata->urbdata, pdata->urb->transfer_dma); usb_free_urb(pdata->urb); } if (!IS_ERR(pdata->bd)) backlight_device_unregister(pdata->bd); kfree(pdata->msgdata); } usb_set_intfdata(iface, NULL); kfree(pdata); return retval; } static void appledisplay_disconnect(struct usb_interface *iface) { struct appledisplay *pdata = usb_get_intfdata(iface); if (pdata) { usb_kill_urb(pdata->urb); cancel_delayed_work_sync(&pdata->work); backlight_device_unregister(pdata->bd); usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN, pdata->urbdata, pdata->urb->transfer_dma); usb_free_urb(pdata->urb); kfree(pdata->msgdata); kfree(pdata); } printk(KERN_INFO "appledisplay: Apple Cinema Display disconnected\n"); } static struct usb_driver appledisplay_driver = { .name = "appledisplay", .probe = appledisplay_probe, .disconnect = appledisplay_disconnect, .id_table = appledisplay_table, }; module_usb_driver(appledisplay_driver); MODULE_AUTHOR("Michael Hanselmann"); MODULE_DESCRIPTION("Apple Cinema Display driver"); MODULE_LICENSE("GPL");
3 3 2 3 124 124 4 1 2 2 54 5 54 54 54 54 45 44 5 45 45 45 45 133 133 133 133 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/protocol.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" /* Internal logging interface, which relies on the real LOG target modules */ #define NFLOGGER_NAME_LEN 64 int sysctl_nf_log_all_netns __read_mostly; EXPORT_SYMBOL(sysctl_nf_log_all_netns); static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); #define nft_log_dereference(logger) \ rcu_dereference_protected(logger, lockdep_is_held(&nf_log_mutex)) static struct nf_logger *__find_logger(int pf, const char *str_logger) { struct nf_logger *log; int i; for (i = 0; i < NF_LOG_TYPE_MAX; i++) { log = nft_log_dereference(loggers[pf][i]); if (!log) continue; if (!strncasecmp(str_logger, log->name, strlen(log->name))) return log; } return NULL; } int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger) { const struct nf_logger *log; if (pf == NFPROTO_UNSPEC || pf >= ARRAY_SIZE(net->nf.nf_loggers)) return -EOPNOTSUPP; mutex_lock(&nf_log_mutex); log = nft_log_dereference(net->nf.nf_loggers[pf]); if (log == NULL) rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_set); void nf_log_unset(struct net *net, const struct nf_logger *logger) { int i; const struct nf_logger *log; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { log = nft_log_dereference(net->nf.nf_loggers[i]); if (log == logger) RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL); } mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unset); /* return EEXIST if the same logger is registered, 0 on success. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger) { int i; int ret = 0; if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { if (rcu_access_pointer(loggers[i][logger->type])) { ret = -EEXIST; goto unlock; } } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) rcu_assign_pointer(loggers[i][logger->type], logger); } else { if (rcu_access_pointer(loggers[pf][logger->type])) { ret = -EEXIST; goto unlock; } rcu_assign_pointer(loggers[pf][logger->type], logger); } unlock: mutex_unlock(&nf_log_mutex); return ret; } EXPORT_SYMBOL(nf_log_register); void nf_log_unregister(struct nf_logger *logger) { const struct nf_logger *log; int i; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { log = nft_log_dereference(loggers[i][logger->type]); if (log == logger) RCU_INIT_POINTER(loggers[i][logger->type], NULL); } mutex_unlock(&nf_log_mutex); synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(struct net *net, u_int8_t pf, const struct nf_logger *logger) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(struct net *net, u_int8_t pf) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return; mutex_lock(&nf_log_mutex); RCU_INIT_POINTER(net->nf.nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unbind_pf); int nf_logger_find_get(int pf, enum nf_log_type type) { struct nf_logger *logger; int ret = -ENOENT; if (pf >= ARRAY_SIZE(loggers)) return -EINVAL; if (type >= NF_LOG_TYPE_MAX) return -EINVAL; if (pf == NFPROTO_INET) { ret = nf_logger_find_get(NFPROTO_IPV4, type); if (ret < 0) return ret; ret = nf_logger_find_get(NFPROTO_IPV6, type); if (ret < 0) { nf_logger_put(NFPROTO_IPV4, type); return ret; } return 0; } rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (logger == NULL) goto out; if (try_module_get(logger->me)) ret = 0; out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_logger_find_get); void nf_logger_put(int pf, enum nf_log_type type) { struct nf_logger *logger; if (pf == NFPROTO_INET) { nf_logger_put(NFPROTO_IPV4, type); nf_logger_put(NFPROTO_IPV6, type); return; } rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (!logger) WARN_ON_ONCE(1); else module_put(logger->me); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_logger_put); void nf_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); if (loginfo != NULL) logger = rcu_dereference(loggers[pf][loginfo->type]); else logger = rcu_dereference(net->nf.nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_packet); void nf_log_trace(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); logger = rcu_dereference(net->nf.nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_trace); #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) struct nf_log_buf { unsigned int count; char buf[S_SIZE + 1]; }; static struct nf_log_buf emergency, *emergency_ptr = &emergency; __printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...) { va_list args; int len; if (likely(m->count < S_SIZE)) { va_start(args, f); len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args); va_end(args); if (likely(m->count + len < S_SIZE)) { m->count += len; return 0; } } m->count = S_SIZE; printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n"); return -1; } EXPORT_SYMBOL_GPL(nf_log_buf_add); struct nf_log_buf *nf_log_buf_open(void) { struct nf_log_buf *m = kmalloc(sizeof(*m), GFP_ATOMIC); if (unlikely(!m)) { local_bh_disable(); do { m = xchg(&emergency_ptr, NULL); } while (!m); } m->count = 0; return m; } EXPORT_SYMBOL_GPL(nf_log_buf_open); void nf_log_buf_close(struct nf_log_buf *m) { m->buf[m->count] = 0; printk("%s\n", m->buf); if (likely(m != &emergency)) kfree(m); else { emergency_ptr = m; local_bh_enable(); } } EXPORT_SYMBOL_GPL(nf_log_buf_close); #ifdef CONFIG_PROC_FS static void *seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); mutex_lock(&nf_log_mutex); if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { struct net *net = seq_file_net(s); (*pos)++; if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void seq_stop(struct seq_file *s, void *v) { mutex_unlock(&nf_log_mutex); } static int seq_show(struct seq_file *s, void *v) { loff_t *pos = v; const struct nf_logger *logger; int i; struct net *net = seq_file_net(s); logger = nft_log_dereference(net->nf.nf_loggers[*pos]); if (!logger) seq_printf(s, "%2lld NONE (", *pos); else seq_printf(s, "%2lld %s (", *pos, logger->name); if (seq_has_overflowed(s)) return -ENOSPC; for (i = 0; i < NF_LOG_TYPE_MAX; i++) { if (loggers[*pos][i] == NULL) continue; logger = nft_log_dereference(loggers[*pos][i]); seq_puts(s, logger->name); if (i == 0 && loggers[*pos][i + 1] != NULL) seq_puts(s, ","); if (seq_has_overflowed(s)) return -ENOSPC; } seq_puts(s, ")\n"); if (seq_has_overflowed(s)) return -ENOSPC; return 0; } static const struct seq_operations nflog_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO]; static struct ctl_table_header *nf_log_sysctl_fhdr; static struct ctl_table nf_log_sysctl_ftable[] = { { .procname = "nf_log_all_netns", .data = &sysctl_nf_log_all_netns, .maxlen = sizeof(sysctl_nf_log_all_netns), .mode = 0644, .proc_handler = proc_dointvec, }, }; static int nf_log_proc_dostring(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { const struct nf_logger *logger; char buf[NFLOGGER_NAME_LEN]; int r = 0; int tindex = (unsigned long)table->extra1; struct net *net = table->extra2; if (write) { struct ctl_table tmp = *table; /* proc_dostring() can append to existing strings, so we need to * initialize it as an empty string. */ buf[0] = '\0'; tmp.data = buf; r = proc_dostring(&tmp, write, buffer, lenp, ppos); if (r) return r; if (!strcmp(buf, "NONE")) { nf_log_unbind_pf(net, tindex); return 0; } mutex_lock(&nf_log_mutex); logger = __find_logger(tindex, buf); if (logger == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { struct ctl_table tmp = *table; tmp.data = buf; mutex_lock(&nf_log_mutex); logger = nft_log_dereference(net->nf.nf_loggers[tindex]); if (!logger) strscpy(buf, "NONE", sizeof(buf)); else strscpy(buf, logger->name, sizeof(buf)); mutex_unlock(&nf_log_mutex); r = proc_dostring(&tmp, write, buffer, lenp, ppos); } return r; } static int netfilter_log_sysctl_init(struct net *net) { int i; struct ctl_table *table; table = nf_log_sysctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(nf_log_sysctl_table, sizeof(nf_log_sysctl_table), GFP_KERNEL); if (!table) goto err_alloc; } else { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { snprintf(nf_log_sysctl_fnames[i], 3, "%d", i); nf_log_sysctl_table[i].procname = nf_log_sysctl_fnames[i]; nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN; nf_log_sysctl_table[i].mode = 0644; nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter", nf_log_sysctl_ftable); if (!nf_log_sysctl_fhdr) goto err_freg; } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) table[i].extra2 = net; net->nf.nf_log_dir_header = register_net_sysctl_sz(net, "net/netfilter/nf_log", table, ARRAY_SIZE(nf_log_sysctl_table)); if (!net->nf.nf_log_dir_header) goto err_reg; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); else unregister_net_sysctl_table(nf_log_sysctl_fhdr); err_freg: err_alloc: return -ENOMEM; } static void netfilter_log_sysctl_exit(struct net *net) { const struct ctl_table *table; table = net->nf.nf_log_dir_header->ctl_table_arg; unregister_net_sysctl_table(net->nf.nf_log_dir_header); if (!net_eq(net, &init_net)) kfree(table); else unregister_net_sysctl_table(nf_log_sysctl_fhdr); } #else static int netfilter_log_sysctl_init(struct net *net) { return 0; } static void netfilter_log_sysctl_exit(struct net *net) { } #endif /* CONFIG_SYSCTL */ static int __net_init nf_log_net_init(struct net *net) { int ret = -ENOMEM; #ifdef CONFIG_PROC_FS if (!proc_create_net("nf_log", 0444, net->nf.proc_netfilter, &nflog_seq_ops, sizeof(struct seq_net_private))) return ret; #endif ret = netfilter_log_sysctl_init(net); if (ret < 0) goto out_sysctl; return 0; out_sysctl: #ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif return ret; } static void __net_exit nf_log_net_exit(struct net *net) { netfilter_log_sysctl_exit(net); #ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif } static struct pernet_operations nf_log_net_ops = { .init = nf_log_net_init, .exit = nf_log_net_exit, }; int __init netfilter_log_init(void) { return register_pernet_subsys(&nf_log_net_ops); }
3 88 88 80 1 4 53 88 14 87 30 67 16 16 2 2 45 44 8 12 11 16 44 74 73 63 74 15 63 62 1 5 58 2 35 37 14 23 36 37 59 60 60 62 63 62 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 7 4 2 2 2 2 3 23 1 23 13 1 1 1 1 1 5 5 5 15 3 1 1 1 5 2 2 1 1 1 2 4 4 25 24 25 2 2 2 1 2 3 2 2 2 2 1 1 3 5 4 3 3 3 2 2 2 2 2 3 3 3 3 2 2 2 2 2 35 35 1 15 15 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli */ #include "translation-table.h" #include "main.h" #include <linux/atomic.h> #include <linux/bitops.h> #include <linux/build_bug.h> #include <linux/byteorder/generic.h> #include <linux/cache.h> #include <linux/compiler.h> #include <linux/container_of.h> #include <linux/crc32c.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/workqueue.h> #include <net/genetlink.h> #include <net/netlink.h> #include <net/sock.h> #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h> #include "bridge_loop_avoidance.h" #include "hard-interface.h" #include "hash.h" #include "log.h" #include "netlink.h" #include "originator.h" #include "soft-interface.h" #include "tvlv.h" static struct kmem_cache *batadv_tl_cache __read_mostly; static struct kmem_cache *batadv_tg_cache __read_mostly; static struct kmem_cache *batadv_tt_orig_cache __read_mostly; static struct kmem_cache *batadv_tt_change_cache __read_mostly; static struct kmem_cache *batadv_tt_req_cache __read_mostly; static struct kmem_cache *batadv_tt_roam_cache __read_mostly; /* hash class keys */ static struct lock_class_key batadv_tt_local_hash_lock_class_key; static struct lock_class_key batadv_tt_global_hash_lock_class_key; static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client, unsigned short vid, struct batadv_orig_node *orig_node); static void batadv_tt_purge(struct work_struct *work); static void batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry); static void batadv_tt_global_del(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, const unsigned char *addr, unsigned short vid, const char *message, bool roaming); /** * batadv_compare_tt() - check if two TT entries are the same * @node: the list element pointer of the first TT entry * @data2: pointer to the tt_common_entry of the second TT entry * * Compare the MAC address and the VLAN ID of the two TT entries and check if * they are the same TT client. * Return: true if the two TT clients are the same, false otherwise */ static bool batadv_compare_tt(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct batadv_tt_common_entry, hash_entry); const struct batadv_tt_common_entry *tt1 = data1; const struct batadv_tt_common_entry *tt2 = data2; return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2); } /** * batadv_choose_tt() - return the index of the tt entry in the hash table * @data: pointer to the tt_common_entry object to map * @size: the size of the hash table * * Return: the hash index where the object represented by 'data' should be * stored at. */ static inline u32 batadv_choose_tt(const void *data, u32 size) { const struct batadv_tt_common_entry *tt; u32 hash = 0; tt = data; hash = jhash(&tt->addr, ETH_ALEN, hash); hash = jhash(&tt->vid, sizeof(tt->vid), hash); return hash % size; } /** * batadv_tt_hash_find() - look for a client in the given hash table * @hash: the hash table to search * @addr: the mac address of the client to look for * @vid: VLAN identifier * * Return: a pointer to the tt_common struct belonging to the searched client if * found, NULL otherwise. */ static struct batadv_tt_common_entry * batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr, unsigned short vid) { struct hlist_head *head; struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL; u32 index; if (!hash) return NULL; ether_addr_copy(to_search.addr, addr); to_search.vid = vid; index = batadv_choose_tt(&to_search, hash->size); head = &hash->table[index]; rcu_read_lock(); hlist_for_each_entry_rcu(tt, head, hash_entry) { if (!batadv_compare_eth(tt, addr)) continue; if (tt->vid != vid) continue; if (!kref_get_unless_zero(&tt->refcount)) continue; tt_tmp = tt; break; } rcu_read_unlock(); return tt_tmp; } /** * batadv_tt_local_hash_find() - search the local table for a given client * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the client to look for * @vid: VLAN identifier * * Return: a pointer to the corresponding tt_local_entry struct if the client is * found, NULL otherwise. */ static struct batadv_tt_local_entry * batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_local_entry *tt_local_entry = NULL; tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, addr, vid); if (tt_common_entry) tt_local_entry = container_of(tt_common_entry, struct batadv_tt_local_entry, common); return tt_local_entry; } /** * batadv_tt_global_hash_find() - search the global table for a given client * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the client to look for * @vid: VLAN identifier * * Return: a pointer to the corresponding tt_global_entry struct if the client * is found, NULL otherwise. */ struct batadv_tt_global_entry * batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_global_entry *tt_global_entry = NULL; tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, addr, vid); if (tt_common_entry) tt_global_entry = container_of(tt_common_entry, struct batadv_tt_global_entry, common); return tt_global_entry; } /** * batadv_tt_local_entry_free_rcu() - free the tt_local_entry * @rcu: rcu pointer of the tt_local_entry */ static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu) { struct batadv_tt_local_entry *tt_local_entry; tt_local_entry = container_of(rcu, struct batadv_tt_local_entry, common.rcu); kmem_cache_free(batadv_tl_cache, tt_local_entry); } /** * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue * for free after rcu grace period * @ref: kref pointer of the nc_node */ static void batadv_tt_local_entry_release(struct kref *ref) { struct batadv_tt_local_entry *tt_local_entry; tt_local_entry = container_of(ref, struct batadv_tt_local_entry, common.refcount); batadv_softif_vlan_put(tt_local_entry->vlan); call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu); } /** * batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and * possibly release it * @tt_local_entry: tt_local_entry to be free'd */ static void batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry) { if (!tt_local_entry) return; kref_put(&tt_local_entry->common.refcount, batadv_tt_local_entry_release); } /** * batadv_tt_global_entry_free_rcu() - free the tt_global_entry * @rcu: rcu pointer of the tt_global_entry */ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) { struct batadv_tt_global_entry *tt_global_entry; tt_global_entry = container_of(rcu, struct batadv_tt_global_entry, common.rcu); kmem_cache_free(batadv_tg_cache, tt_global_entry); } /** * batadv_tt_global_entry_release() - release tt_global_entry from lists and * queue for free after rcu grace period * @ref: kref pointer of the nc_node */ void batadv_tt_global_entry_release(struct kref *ref) { struct batadv_tt_global_entry *tt_global_entry; tt_global_entry = container_of(ref, struct batadv_tt_global_entry, common.refcount); batadv_tt_global_del_orig_list(tt_global_entry); call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu); } /** * batadv_tt_global_hash_count() - count the number of orig entries * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the client to count entries for * @vid: VLAN identifier * * Return: the number of originators advertising the given address/data * (excluding our self). */ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_tt_global_entry *tt_global_entry; int count; tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid); if (!tt_global_entry) return 0; count = atomic_read(&tt_global_entry->orig_list_count); batadv_tt_global_entry_put(tt_global_entry); return count; } /** * batadv_tt_local_size_mod() - change the size by v of the local table * identified by vid * @bat_priv: the bat priv with all the soft interface information * @vid: the VLAN identifier of the sub-table to change * @v: the amount to sum to the local table size */ static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv, unsigned short vid, int v) { struct batadv_softif_vlan *vlan; vlan = batadv_softif_vlan_get(bat_priv, vid); if (!vlan) return; atomic_add(v, &vlan->tt.num_entries); batadv_softif_vlan_put(vlan); } /** * batadv_tt_local_size_inc() - increase by one the local table size for the * given vid * @bat_priv: the bat priv with all the soft interface information * @vid: the VLAN identifier */ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv, unsigned short vid) { batadv_tt_local_size_mod(bat_priv, vid, 1); } /** * batadv_tt_local_size_dec() - decrease by one the local table size for the * given vid * @bat_priv: the bat priv with all the soft interface information * @vid: the VLAN identifier */ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv, unsigned short vid) { batadv_tt_local_size_mod(bat_priv, vid, -1); } /** * batadv_tt_global_size_mod() - change the size by v of the global table * for orig_node identified by vid * @orig_node: the originator for which the table has to be modified * @vid: the VLAN identifier * @v: the amount to sum to the global table size */ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node, unsigned short vid, int v) { struct batadv_orig_node_vlan *vlan; vlan = batadv_orig_node_vlan_new(orig_node, vid); if (!vlan) return; if (atomic_add_return(v, &vlan->tt.num_entries) == 0) { spin_lock_bh(&orig_node->vlan_list_lock); if (!hlist_unhashed(&vlan->list)) { hlist_del_init_rcu(&vlan->list); batadv_orig_node_vlan_put(vlan); } spin_unlock_bh(&orig_node->vlan_list_lock); } batadv_orig_node_vlan_put(vlan); } /** * batadv_tt_global_size_inc() - increase by one the global table size for the * given vid * @orig_node: the originator which global table size has to be decreased * @vid: the vlan identifier */ static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node, unsigned short vid) { batadv_tt_global_size_mod(orig_node, vid, 1); } /** * batadv_tt_global_size_dec() - decrease by one the global table size for the * given vid * @orig_node: the originator which global table size has to be decreased * @vid: the vlan identifier */ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node, unsigned short vid) { batadv_tt_global_size_mod(orig_node, vid, -1); } /** * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry * @rcu: rcu pointer of the orig_entry */ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) { struct batadv_tt_orig_list_entry *orig_entry; orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); kmem_cache_free(batadv_tt_orig_cache, orig_entry); } /** * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and * queue for free after rcu grace period * @ref: kref pointer of the tt orig entry */ static void batadv_tt_orig_list_entry_release(struct kref *ref) { struct batadv_tt_orig_list_entry *orig_entry; orig_entry = container_of(ref, struct batadv_tt_orig_list_entry, refcount); batadv_orig_node_put(orig_entry->orig_node); call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); } /** * batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and * possibly release it * @orig_entry: tt orig entry to be free'd */ static void batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry) { if (!orig_entry) return; kref_put(&orig_entry->refcount, batadv_tt_orig_list_entry_release); } /** * batadv_tt_local_event() - store a local TT event (ADD/DEL) * @bat_priv: the bat priv with all the soft interface information * @tt_local_entry: the TT entry involved in the event * @event_flags: flags to store in the event structure */ static void batadv_tt_local_event(struct batadv_priv *bat_priv, struct batadv_tt_local_entry *tt_local_entry, u8 event_flags) { struct batadv_tt_change_node *tt_change_node, *entry, *safe; struct batadv_tt_common_entry *common = &tt_local_entry->common; u8 flags = common->flags | event_flags; bool event_removed = false; bool del_op_requested, del_op_entry; tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC); if (!tt_change_node) return; tt_change_node->change.flags = flags; memset(tt_change_node->change.reserved, 0, sizeof(tt_change_node->change.reserved)); ether_addr_copy(tt_change_node->change.addr, common->addr); tt_change_node->change.vid = htons(common->vid); del_op_requested = flags & BATADV_TT_CLIENT_DEL; /* check for ADD+DEL or DEL+ADD events */ spin_lock_bh(&bat_priv->tt.changes_list_lock); list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, list) { if (!batadv_compare_eth(entry->change.addr, common->addr)) continue; /* DEL+ADD in the same orig interval have no effect and can be * removed to avoid silly behaviour on the receiver side. The * other way around (ADD+DEL) can happen in case of roaming of * a client still in the NEW state. Roaming of NEW clients is * now possible due to automatically recognition of "temporary" * clients */ del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL; if (!del_op_requested && del_op_entry) goto del; if (del_op_requested && !del_op_entry) goto del; /* this is a second add in the same originator interval. It * means that flags have been changed: update them! */ if (!del_op_requested && !del_op_entry) entry->change.flags = flags; continue; del: list_del(&entry->list); kmem_cache_free(batadv_tt_change_cache, entry); kmem_cache_free(batadv_tt_change_cache, tt_change_node); event_removed = true; goto unlock; } /* track the change in the OGMinterval list */ list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list); unlock: spin_unlock_bh(&bat_priv->tt.changes_list_lock); if (event_removed) atomic_dec(&bat_priv->tt.local_changes); else atomic_inc(&bat_priv->tt.local_changes); } /** * batadv_tt_len() - compute length in bytes of given number of tt changes * @changes_num: number of tt changes * * Return: computed length in bytes. */ static int batadv_tt_len(int changes_num) { return changes_num * sizeof(struct batadv_tvlv_tt_change); } /** * batadv_tt_entries() - compute the number of entries fitting in tt_len bytes * @tt_len: available space * * Return: the number of entries. */ static u16 batadv_tt_entries(u16 tt_len) { return tt_len / batadv_tt_len(1); } /** * batadv_tt_local_table_transmit_size() - calculates the local translation * table size when transmitted over the air * @bat_priv: the bat priv with all the soft interface information * * Return: local translation table size in bytes. */ static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv) { u16 num_vlan = 0; u16 tt_local_entries = 0; struct batadv_softif_vlan *vlan; int hdr_size; rcu_read_lock(); hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { num_vlan++; tt_local_entries += atomic_read(&vlan->tt.num_entries); } rcu_read_unlock(); /* header size of tvlv encapsulated tt response payload */ hdr_size = sizeof(struct batadv_unicast_tvlv_packet); hdr_size += sizeof(struct batadv_tvlv_hdr); hdr_size += sizeof(struct batadv_tvlv_tt_data); hdr_size += num_vlan * sizeof(struct batadv_tvlv_tt_vlan_data); return hdr_size + batadv_tt_len(tt_local_entries); } static int batadv_tt_local_init(struct batadv_priv *bat_priv) { if (bat_priv->tt.local_hash) return 0; bat_priv->tt.local_hash = batadv_hash_new(1024); if (!bat_priv->tt.local_hash) return -ENOMEM; batadv_hash_set_lock_class(bat_priv->tt.local_hash, &batadv_tt_local_hash_lock_class_key); return 0; } static void batadv_tt_global_free(struct batadv_priv *bat_priv, struct batadv_tt_global_entry *tt_global, const char *message) { struct batadv_tt_global_entry *tt_removed_entry; struct hlist_node *tt_removed_node; batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting global tt entry %pM (vid: %d): %s\n", tt_global->common.addr, batadv_print_vid(tt_global->common.vid), message); tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, batadv_choose_tt, &tt_global->common); if (!tt_removed_node) return; /* drop reference of remove hash entry */ tt_removed_entry = hlist_entry(tt_removed_node, struct batadv_tt_global_entry, common.hash_entry); batadv_tt_global_entry_put(tt_removed_entry); } /** * batadv_tt_local_add() - add a new client to the local table or update an * existing client * @soft_iface: netdev struct of the mesh interface * @addr: the mac address of the client to add * @vid: VLAN identifier * @ifindex: index of the interface where the client is connected to (useful to * identify wireless clients) * @mark: the value contained in the skb->mark field of the received packet (if * any) * * Return: true if the client was successfully added, false otherwise. */ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, unsigned short vid, int ifindex, u32 mark) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct batadv_tt_local_entry *tt_local; struct batadv_tt_global_entry *tt_global = NULL; struct net *net = dev_net(soft_iface); struct batadv_softif_vlan *vlan; struct net_device *in_dev = NULL; struct batadv_hard_iface *in_hardif = NULL; struct hlist_head *head; struct batadv_tt_orig_list_entry *orig_entry; int hash_added, table_size, packet_size_max; bool ret = false; bool roamed_back = false; u8 remote_flags; u32 match_mark; if (ifindex != BATADV_NULL_IFINDEX) in_dev = dev_get_by_index(net, ifindex); if (in_dev) in_hardif = batadv_hardif_get_by_netdev(in_dev); tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!is_multicast_ether_addr(addr)) tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid); if (tt_local) { tt_local->last_seen = jiffies; if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) { batadv_dbg(BATADV_DBG_TT, bat_priv, "Re-adding pending client %pM (vid: %d)\n", addr, batadv_print_vid(vid)); /* whatever the reason why the PENDING flag was set, * this is a client which was enqueued to be removed in * this orig_interval. Since it popped up again, the * flag can be reset like it was never enqueued */ tt_local->common.flags &= ~BATADV_TT_CLIENT_PENDING; goto add_event; } if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) { batadv_dbg(BATADV_DBG_TT, bat_priv, "Roaming client %pM (vid: %d) came back to its original location\n", addr, batadv_print_vid(vid)); /* the ROAM flag is set because this client roamed away * and the node got a roaming_advertisement message. Now * that the client popped up again at its original * location such flag can be unset */ tt_local->common.flags &= ~BATADV_TT_CLIENT_ROAM; roamed_back = true; } goto check_roaming; } /* Ignore the client if we cannot send it in a full table response. */ table_size = batadv_tt_local_table_transmit_size(bat_priv); table_size += batadv_tt_len(1); packet_size_max = atomic_read(&bat_priv->packet_size_max); if (table_size > packet_size_max) { net_ratelimited_function(batadv_info, soft_iface, "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n", table_size, packet_size_max, addr); goto out; } tt_local = kmem_cache_alloc(batadv_tl_cache, GFP_ATOMIC); if (!tt_local) goto out; /* increase the refcounter of the related vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); if (!vlan) { net_ratelimited_function(batadv_info, soft_iface, "adding TT local entry %pM to non-existent VLAN %d\n", addr, batadv_print_vid(vid)); kmem_cache_free(batadv_tl_cache, tt_local); tt_local = NULL; goto out; } batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", addr, batadv_print_vid(vid), (u8)atomic_read(&bat_priv->tt.vn)); ether_addr_copy(tt_local->common.addr, addr); /* The local entry has to be marked as NEW to avoid to send it in * a full table response going out before the next ttvn increment * (consistency check) */ tt_local->common.flags = BATADV_TT_CLIENT_NEW; tt_local->common.vid = vid; if (batadv_is_wifi_hardif(in_hardif)) tt_local->common.flags |= BATADV_TT_CLIENT_WIFI; kref_init(&tt_local->common.refcount); tt_local->last_seen = jiffies; tt_local->common.added_at = tt_local->last_seen; tt_local->vlan = vlan; /* the batman interface mac and multicast addresses should never be * purged */ if (batadv_compare_eth(addr, soft_iface->dev_addr) || is_multicast_ether_addr(addr)) tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE; kref_get(&tt_local->common.refcount); hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, batadv_choose_tt, &tt_local->common, &tt_local->common.hash_entry); if (unlikely(hash_added != 0)) { /* remove the reference for the hash */ batadv_tt_local_entry_put(tt_local); goto out; } add_event: batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS); check_roaming: /* Check whether it is a roaming, but don't do anything if the roaming * process has already been handled */ if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) { /* These node are probably going to update their tt table */ head = &tt_global->orig_list; rcu_read_lock(); hlist_for_each_entry_rcu(orig_entry, head, list) { batadv_send_roam_adv(bat_priv, tt_global->common.addr, tt_global->common.vid, orig_entry->orig_node); } rcu_read_unlock(); if (roamed_back) { batadv_tt_global_free(bat_priv, tt_global, "Roaming canceled"); } else { /* The global entry has to be marked as ROAMING and * has to be kept for consistency purpose */ tt_global->common.flags |= BATADV_TT_CLIENT_ROAM; tt_global->roam_at = jiffies; } } /* store the current remote flags before altering them. This helps * understanding is flags are changing or not */ remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK; if (batadv_is_wifi_hardif(in_hardif)) tt_local->common.flags |= BATADV_TT_CLIENT_WIFI; else tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI; /* check the mark in the skb: if it's equal to the configured * isolation_mark, it means the packet is coming from an isolated * non-mesh client */ match_mark = (mark & bat_priv->isolation_mark_mask); if (bat_priv->isolation_mark_mask && match_mark == bat_priv->isolation_mark) tt_local->common.flags |= BATADV_TT_CLIENT_ISOLA; else tt_local->common.flags &= ~BATADV_TT_CLIENT_ISOLA; /* if any "dynamic" flag has been modified, resend an ADD event for this * entry so that all the nodes can get the new flags */ if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK)) batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS); ret = true; out: batadv_hardif_put(in_hardif); dev_put(in_dev); batadv_tt_local_entry_put(tt_local); batadv_tt_global_entry_put(tt_global); return ret; } /** * batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send * within a TT Response directed to another node * @orig_node: originator for which the TT data has to be prepared * @tt_data: uninitialised pointer to the address of the TVLV buffer * @tt_change: uninitialised pointer to the address of the area where the TT * changed can be stored * @tt_len: pointer to the length to reserve to the tt_change. if -1 this * function reserves the amount of space needed to send the entire global TT * table. In case of success the value is updated with the real amount of * reserved bytes * Allocate the needed amount of memory for the entire TT TVLV and write its * header made up of one tvlv_tt_data object and a series of tvlv_tt_vlan_data * objects, one per active VLAN served by the originator node. * * Return: the size of the allocated buffer or 0 in case of failure. */ static u16 batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, struct batadv_tvlv_tt_data **tt_data, struct batadv_tvlv_tt_change **tt_change, s32 *tt_len) { u16 num_vlan = 0; u16 num_entries = 0; u16 change_offset; u16 tvlv_len; struct batadv_tvlv_tt_vlan_data *tt_vlan; struct batadv_orig_node_vlan *vlan; u8 *tt_change_ptr; spin_lock_bh(&orig_node->vlan_list_lock); hlist_for_each_entry(vlan, &orig_node->vlan_list, list) { num_vlan++; num_entries += atomic_read(&vlan->tt.num_entries); } change_offset = sizeof(**tt_data); change_offset += num_vlan * sizeof(*tt_vlan); /* if tt_len is negative, allocate the space needed by the full table */ if (*tt_len < 0) *tt_len = batadv_tt_len(num_entries); tvlv_len = *tt_len; tvlv_len += change_offset; *tt_data = kmalloc(tvlv_len, GFP_ATOMIC); if (!*tt_data) { *tt_len = 0; goto out; } (*tt_data)->flags = BATADV_NO_FLAGS; (*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn); (*tt_data)->num_vlan = htons(num_vlan); tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); hlist_for_each_entry(vlan, &orig_node->vlan_list, list) { tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); tt_vlan->reserved = 0; tt_vlan++; } tt_change_ptr = (u8 *)*tt_data + change_offset; *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; out: spin_unlock_bh(&orig_node->vlan_list_lock); return tvlv_len; } /** * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for * this node * @bat_priv: the bat priv with all the soft interface information * @tt_data: uninitialised pointer to the address of the TVLV buffer * @tt_change: uninitialised pointer to the address of the area where the TT * changes can be stored * @tt_len: pointer to the length to reserve to the tt_change. if -1 this * function reserves the amount of space needed to send the entire local TT * table. In case of success the value is updated with the real amount of * reserved bytes * * Allocate the needed amount of memory for the entire TT TVLV and write its * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data * objects, one per active VLAN. * * Return: the size of the allocated buffer or 0 in case of failure. */ static u16 batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, struct batadv_tvlv_tt_data **tt_data, struct batadv_tvlv_tt_change **tt_change, s32 *tt_len) { struct batadv_tvlv_tt_vlan_data *tt_vlan; struct batadv_softif_vlan *vlan; u16 num_vlan = 0; u16 vlan_entries = 0; u16 total_entries = 0; u16 tvlv_len; u8 *tt_change_ptr; int change_offset; spin_lock_bh(&bat_priv->softif_vlan_list_lock); hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) { vlan_entries = atomic_read(&vlan->tt.num_entries); if (vlan_entries < 1) continue; num_vlan++; total_entries += vlan_entries; } change_offset = sizeof(**tt_data); change_offset += num_vlan * sizeof(*tt_vlan); /* if tt_len is negative, allocate the space needed by the full table */ if (*tt_len < 0) *tt_len = batadv_tt_len(total_entries); tvlv_len = *tt_len; tvlv_len += change_offset; *tt_data = kmalloc(tvlv_len, GFP_ATOMIC); if (!*tt_data) { tvlv_len = 0; goto out; } (*tt_data)->flags = BATADV_NO_FLAGS; (*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn); (*tt_data)->num_vlan = htons(num_vlan); tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) { vlan_entries = atomic_read(&vlan->tt.num_entries); if (vlan_entries < 1) continue; tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); tt_vlan->reserved = 0; tt_vlan++; } tt_change_ptr = (u8 *)*tt_data + change_offset; *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; out: spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return tvlv_len; } /** * batadv_tt_tvlv_container_update() - update the translation table tvlv * container after local tt changes have been committed * @bat_priv: the bat priv with all the soft interface information */ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) { struct batadv_tt_change_node *entry, *safe; struct batadv_tvlv_tt_data *tt_data; struct batadv_tvlv_tt_change *tt_change; int tt_diff_len, tt_change_len = 0; int tt_diff_entries_num = 0; int tt_diff_entries_count = 0; u16 tvlv_len; tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes); tt_diff_len = batadv_tt_len(tt_diff_entries_num); /* if we have too many changes for one packet don't send any * and wait for the tt table request which will be fragmented */ if (tt_diff_len > bat_priv->soft_iface->mtu) tt_diff_len = 0; tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data, &tt_change, &tt_diff_len); if (!tvlv_len) return; tt_data->flags = BATADV_TT_OGM_DIFF; if (tt_diff_len == 0) goto container_register; spin_lock_bh(&bat_priv->tt.changes_list_lock); atomic_set(&bat_priv->tt.local_changes, 0); list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, list) { if (tt_diff_entries_count < tt_diff_entries_num) { memcpy(tt_change + tt_diff_entries_count, &entry->change, sizeof(struct batadv_tvlv_tt_change)); tt_diff_entries_count++; } list_del(&entry->list); kmem_cache_free(batadv_tt_change_cache, entry); } spin_unlock_bh(&bat_priv->tt.changes_list_lock); /* Keep the buffer for possible tt_request */ spin_lock_bh(&bat_priv->tt.last_changeset_lock); kfree(bat_priv->tt.last_changeset); bat_priv->tt.last_changeset_len = 0; bat_priv->tt.last_changeset = NULL; tt_change_len = batadv_tt_len(tt_diff_entries_count); /* check whether this new OGM has no changes due to size problems */ if (tt_diff_entries_count > 0) { /* if kmalloc() fails we will reply with the full table * instead of providing the diff */ bat_priv->tt.last_changeset = kzalloc(tt_diff_len, GFP_ATOMIC); if (bat_priv->tt.last_changeset) { memcpy(bat_priv->tt.last_changeset, tt_change, tt_change_len); bat_priv->tt.last_changeset_len = tt_diff_len; } } spin_unlock_bh(&bat_priv->tt.last_changeset_lock); container_register: batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data, tvlv_len); kfree(tt_data); } /** * batadv_tt_local_dump_entry() - Dump one TT local entry into a message * @msg :Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information * @common: tt local & tt global common data * * Return: Error code, or 0 on success */ static int batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_tt_common_entry *common) { void *hdr; struct batadv_softif_vlan *vlan; struct batadv_tt_local_entry *local; unsigned int last_seen_msecs; u32 crc; local = container_of(common, struct batadv_tt_local_entry, common); last_seen_msecs = jiffies_to_msecs(jiffies - local->last_seen); vlan = batadv_softif_vlan_get(bat_priv, common->vid); if (!vlan) return 0; crc = vlan->tt.crc; batadv_softif_vlan_put(vlan); hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_TRANSTABLE_LOCAL); if (!hdr) return -ENOBUFS; genl_dump_check_consistent(cb, hdr); if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) || nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) goto nla_put_failure; if (!(common->flags & BATADV_TT_CLIENT_NOPURGE) && nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } /** * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information * @hash: hash to dump * @bucket: bucket index to dump * @idx_s: Number of entries to skip * * Return: Error code, or 0 on success */ static int batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_hashtable *hash, unsigned int bucket, int *idx_s) { struct batadv_tt_common_entry *common; int idx = 0; spin_lock_bh(&hash->list_locks[bucket]); cb->seq = atomic_read(&hash->generation) << 1 | 1; hlist_for_each_entry(common, &hash->table[bucket], hash_entry) { if (idx++ < *idx_s) continue; if (batadv_tt_local_dump_entry(msg, portid, cb, bat_priv, common)) { spin_unlock_bh(&hash->list_locks[bucket]); *idx_s = idx - 1; return -EMSGSIZE; } } spin_unlock_bh(&hash->list_locks[bucket]); *idx_s = 0; return 0; } /** * batadv_tt_local_dump() - Dump TT local entries into a message * @msg: Netlink message to dump into * @cb: Parameters from query * * Return: Error code, or 0 on success */ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct net *net = sock_net(cb->skb->sk); struct net_device *soft_iface; struct batadv_priv *bat_priv; struct batadv_hard_iface *primary_if = NULL; struct batadv_hashtable *hash; int ret; int ifindex; int bucket = cb->args[0]; int idx = cb->args[1]; int portid = NETLINK_CB(cb->skb).portid; ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); if (!ifindex) return -EINVAL; soft_iface = dev_get_by_index(net, ifindex); if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { ret = -ENODEV; goto out; } bat_priv = netdev_priv(soft_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; goto out; } hash = bat_priv->tt.local_hash; while (bucket < hash->size) { if (batadv_tt_local_dump_bucket(msg, portid, cb, bat_priv, hash, bucket, &idx)) break; bucket++; } ret = msg->len; out: batadv_hardif_put(primary_if); dev_put(soft_iface); cb->args[0] = bucket; cb->args[1] = idx; return ret; } static void batadv_tt_local_set_pending(struct batadv_priv *bat_priv, struct batadv_tt_local_entry *tt_local_entry, u16 flags, const char *message) { batadv_tt_local_event(bat_priv, tt_local_entry, flags); /* The local client has to be marked as "pending to be removed" but has * to be kept in the table in order to send it in a full table * response issued before the net ttvn increment (consistency check) */ tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING; batadv_dbg(BATADV_DBG_TT, bat_priv, "Local tt entry (%pM, vid: %d) pending to be removed: %s\n", tt_local_entry->common.addr, batadv_print_vid(tt_local_entry->common.vid), message); } /** * batadv_tt_local_remove() - logically remove an entry from the local table * @bat_priv: the bat priv with all the soft interface information * @addr: the MAC address of the client to remove * @vid: VLAN identifier * @message: message to append to the log on deletion * @roaming: true if the deletion is due to a roaming event * * Return: the flags assigned to the local entry before being deleted */ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid, const char *message, bool roaming) { struct batadv_tt_local_entry *tt_removed_entry; struct batadv_tt_local_entry *tt_local_entry; u16 flags, curr_flags = BATADV_NO_FLAGS; struct hlist_node *tt_removed_node; tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) goto out; curr_flags = tt_local_entry->common.flags; flags = BATADV_TT_CLIENT_DEL; /* if this global entry addition is due to a roaming, the node has to * mark the local entry as "roamed" in order to correctly reroute * packets later */ if (roaming) { flags |= BATADV_TT_CLIENT_ROAM; /* mark the local client as ROAMed */ tt_local_entry->common.flags |= BATADV_TT_CLIENT_ROAM; } if (!(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) { batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message); goto out; } /* if this client has been added right now, it is possible to * immediately purge it */ batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash, batadv_compare_tt, batadv_choose_tt, &tt_local_entry->common); if (!tt_removed_node) goto out; /* drop reference of remove hash entry */ tt_removed_entry = hlist_entry(tt_removed_node, struct batadv_tt_local_entry, common.hash_entry); batadv_tt_local_entry_put(tt_removed_entry); out: batadv_tt_local_entry_put(tt_local_entry); return curr_flags; } /** * batadv_tt_local_purge_list() - purge inactive tt local entries * @bat_priv: the bat priv with all the soft interface information * @head: pointer to the list containing the local tt entries * @timeout: parameter deciding whether a given tt local entry is considered * inactive or not */ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, struct hlist_head *head, int timeout) { struct batadv_tt_local_entry *tt_local_entry; struct batadv_tt_common_entry *tt_common_entry; struct hlist_node *node_tmp; hlist_for_each_entry_safe(tt_common_entry, node_tmp, head, hash_entry) { tt_local_entry = container_of(tt_common_entry, struct batadv_tt_local_entry, common); if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE) continue; /* entry already marked for deletion */ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) continue; if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout)) continue; batadv_tt_local_set_pending(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL, "timed out"); } } /** * batadv_tt_local_purge() - purge inactive tt local entries * @bat_priv: the bat priv with all the soft interface information * @timeout: parameter deciding whether a given tt local entry is considered * inactive or not */ static void batadv_tt_local_purge(struct batadv_priv *bat_priv, int timeout) { struct batadv_hashtable *hash = bat_priv->tt.local_hash; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ u32 i; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); batadv_tt_local_purge_list(bat_priv, head, timeout); spin_unlock_bh(list_lock); } } static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_local_entry *tt_local; struct hlist_node *node_tmp; struct hlist_head *head; u32 i; if (!bat_priv->tt.local_hash) return; hash = bat_priv->tt.local_hash; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node_tmp, head, hash_entry) { hlist_del_rcu(&tt_common_entry->hash_entry); tt_local = container_of(tt_common_entry, struct batadv_tt_local_entry, common); batadv_tt_local_entry_put(tt_local); } spin_unlock_bh(list_lock); } batadv_hash_destroy(hash); bat_priv->tt.local_hash = NULL; } static int batadv_tt_global_init(struct batadv_priv *bat_priv) { if (bat_priv->tt.global_hash) return 0; bat_priv->tt.global_hash = batadv_hash_new(1024); if (!bat_priv->tt.global_hash) return -ENOMEM; batadv_hash_set_lock_class(bat_priv->tt.global_hash, &batadv_tt_global_hash_lock_class_key); return 0; } static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv) { struct batadv_tt_change_node *entry, *safe; spin_lock_bh(&bat_priv->tt.changes_list_lock); list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, list) { list_del(&entry->list); kmem_cache_free(batadv_tt_change_cache, entry); } atomic_set(&bat_priv->tt.local_changes, 0); spin_unlock_bh(&bat_priv->tt.changes_list_lock); } /** * batadv_tt_global_orig_entry_find() - find a TT orig_list_entry * @entry: the TT global entry where the orig_list_entry has to be * extracted from * @orig_node: the originator for which the orig_list_entry has to be found * * retrieve the orig_tt_list_entry belonging to orig_node from the * batadv_tt_global_entry list * * Return: it with an increased refcounter, NULL if not found */ static struct batadv_tt_orig_list_entry * batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry, const struct batadv_orig_node *orig_node) { struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; const struct hlist_head *head; rcu_read_lock(); head = &entry->orig_list; hlist_for_each_entry_rcu(tmp_orig_entry, head, list) { if (tmp_orig_entry->orig_node != orig_node) continue; if (!kref_get_unless_zero(&tmp_orig_entry->refcount)) continue; orig_entry = tmp_orig_entry; break; } rcu_read_unlock(); return orig_entry; } /** * batadv_tt_global_entry_has_orig() - check if a TT global entry is also * handled by a given originator * @entry: the TT global entry to check * @orig_node: the originator to search in the list * @flags: a pointer to store TT flags for the given @entry received * from @orig_node * * find out if an orig_node is already in the list of a tt_global_entry. * * Return: true if found, false otherwise */ static bool batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, const struct batadv_orig_node *orig_node, u8 *flags) { struct batadv_tt_orig_list_entry *orig_entry; bool found = false; orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node); if (orig_entry) { found = true; if (flags) *flags = orig_entry->flags; batadv_tt_orig_list_entry_put(orig_entry); } return found; } /** * batadv_tt_global_sync_flags() - update TT sync flags * @tt_global: the TT global entry to update sync flags in * * Updates the sync flag bits in the tt_global flag attribute with a logical * OR of all sync flags from any of its TT orig entries. */ static void batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global) { struct batadv_tt_orig_list_entry *orig_entry; const struct hlist_head *head; u16 flags = BATADV_NO_FLAGS; rcu_read_lock(); head = &tt_global->orig_list; hlist_for_each_entry_rcu(orig_entry, head, list) flags |= orig_entry->flags; rcu_read_unlock(); flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK); tt_global->common.flags = flags; } /** * batadv_tt_global_orig_entry_add() - add or update a TT orig entry * @tt_global: the TT global entry to add an orig entry in * @orig_node: the originator to add an orig entry for * @ttvn: translation table version number of this changeset * @flags: TT sync flags */ static void batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, struct batadv_orig_node *orig_node, int ttvn, u8 flags) { struct batadv_tt_orig_list_entry *orig_entry; spin_lock_bh(&tt_global->list_lock); orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); if (orig_entry) { /* refresh the ttvn: the current value could be a bogus one that * was added during a "temporary client detection" */ orig_entry->ttvn = ttvn; orig_entry->flags = flags; goto sync_flags; } orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); if (!orig_entry) goto out; INIT_HLIST_NODE(&orig_entry->list); kref_get(&orig_node->refcount); batadv_tt_global_size_inc(orig_node, tt_global->common.vid); orig_entry->orig_node = orig_node; orig_entry->ttvn = ttvn; orig_entry->flags = flags; kref_init(&orig_entry->refcount); kref_get(&orig_entry->refcount); hlist_add_head_rcu(&orig_entry->list, &tt_global->orig_list); atomic_inc(&tt_global->orig_list_count); sync_flags: batadv_tt_global_sync_flags(tt_global); out: batadv_tt_orig_list_entry_put(orig_entry); spin_unlock_bh(&tt_global->list_lock); } /** * batadv_tt_global_add() - add a new TT global entry or update an existing one * @bat_priv: the bat priv with all the soft interface information * @orig_node: the originator announcing the client * @tt_addr: the mac address of the non-mesh client * @vid: VLAN identifier * @flags: TT flags that have to be set for this non-mesh client * @ttvn: the tt version number ever announcing this non-mesh client * * Add a new TT global entry for the given originator. If the entry already * exists add a new reference to the given originator (a global entry can have * references to multiple originators) and adjust the flags attribute to reflect * the function argument. * If a TT local entry exists for this non-mesh client remove it. * * The caller must hold the orig_node refcount. * * Return: true if the new entry has been added, false otherwise */ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, const unsigned char *tt_addr, unsigned short vid, u16 flags, u8 ttvn) { struct batadv_tt_global_entry *tt_global_entry; struct batadv_tt_local_entry *tt_local_entry; bool ret = false; int hash_added; struct batadv_tt_common_entry *common; u16 local_flags; /* ignore global entries from backbone nodes */ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) return true; tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr, vid); tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr, vid); /* if the node already has a local client for this entry, it has to wait * for a roaming advertisement instead of manually messing up the global * table */ if ((flags & BATADV_TT_CLIENT_TEMP) && tt_local_entry && !(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) goto out; if (!tt_global_entry) { tt_global_entry = kmem_cache_zalloc(batadv_tg_cache, GFP_ATOMIC); if (!tt_global_entry) goto out; common = &tt_global_entry->common; ether_addr_copy(common->addr, tt_addr); common->vid = vid; if (!is_multicast_ether_addr(common->addr)) common->flags = flags & (~BATADV_TT_SYNC_MASK); tt_global_entry->roam_at = 0; /* node must store current time in case of roaming. This is * needed to purge this entry out on timeout (if nobody claims * it) */ if (flags & BATADV_TT_CLIENT_ROAM) tt_global_entry->roam_at = jiffies; kref_init(&common->refcount); common->added_at = jiffies; INIT_HLIST_HEAD(&tt_global_entry->orig_list); atomic_set(&tt_global_entry->orig_list_count, 0); spin_lock_init(&tt_global_entry->list_lock); kref_get(&common->refcount); hash_added = batadv_hash_add(bat_priv->tt.global_hash, batadv_compare_tt, batadv_choose_tt, common, &common->hash_entry); if (unlikely(hash_added != 0)) { /* remove the reference for the hash */ batadv_tt_global_entry_put(tt_global_entry); goto out_remove; } } else { common = &tt_global_entry->common; /* If there is already a global entry, we can use this one for * our processing. * But if we are trying to add a temporary client then here are * two options at this point: * 1) the global client is not a temporary client: the global * client has to be left as it is, temporary information * should never override any already known client state * 2) the global client is a temporary client: purge the * originator list and add the new one orig_entry */ if (flags & BATADV_TT_CLIENT_TEMP) { if (!(common->flags & BATADV_TT_CLIENT_TEMP)) goto out; if (batadv_tt_global_entry_has_orig(tt_global_entry, orig_node, NULL)) goto out_remove; batadv_tt_global_del_orig_list(tt_global_entry); goto add_orig_entry; } /* if the client was temporary added before receiving the first * OGM announcing it, we have to clear the TEMP flag. Also, * remove the previous temporary orig node and re-add it * if required. If the orig entry changed, the new one which * is a non-temporary entry is preferred. */ if (common->flags & BATADV_TT_CLIENT_TEMP) { batadv_tt_global_del_orig_list(tt_global_entry); common->flags &= ~BATADV_TT_CLIENT_TEMP; } /* the change can carry possible "attribute" flags like the * TT_CLIENT_TEMP, therefore they have to be copied in the * client entry */ if (!is_multicast_ether_addr(common->addr)) common->flags |= flags & (~BATADV_TT_SYNC_MASK); /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only * one originator left in the list and we previously received a * delete + roaming change for this originator. * * We should first delete the old originator before adding the * new one. */ if (common->flags & BATADV_TT_CLIENT_ROAM) { batadv_tt_global_del_orig_list(tt_global_entry); common->flags &= ~BATADV_TT_CLIENT_ROAM; tt_global_entry->roam_at = 0; } } add_orig_entry: /* add the new orig_entry (if needed) or update it */ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn, flags & BATADV_TT_SYNC_MASK); batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new global tt entry: %pM (vid: %d, via %pM)\n", common->addr, batadv_print_vid(common->vid), orig_node->orig); ret = true; out_remove: /* Do not remove multicast addresses from the local hash on * global additions */ if (is_multicast_ether_addr(tt_addr)) goto out; /* remove address from local hash if present */ local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid, "global tt received", flags & BATADV_TT_CLIENT_ROAM); tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI; if (!(flags & BATADV_TT_CLIENT_ROAM)) /* this is a normal global add. Therefore the client is not in a * roaming state anymore. */ tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM; out: batadv_tt_global_entry_put(tt_global_entry); batadv_tt_local_entry_put(tt_local_entry); return ret; } /** * batadv_transtable_best_orig() - Get best originator list entry from tt entry * @bat_priv: the bat priv with all the soft interface information * @tt_global_entry: global translation table entry to be analyzed * * This function assumes the caller holds rcu_read_lock(). * Return: best originator list entry or NULL on errors. */ static struct batadv_tt_orig_list_entry * batadv_transtable_best_orig(struct batadv_priv *bat_priv, struct batadv_tt_global_entry *tt_global_entry) { struct batadv_neigh_node *router, *best_router = NULL; struct batadv_algo_ops *bao = bat_priv->algo_ops; struct hlist_head *head; struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; head = &tt_global_entry->orig_list; hlist_for_each_entry_rcu(orig_entry, head, list) { router = batadv_orig_router_get(orig_entry->orig_node, BATADV_IF_DEFAULT); if (!router) continue; if (best_router && bao->neigh.cmp(router, BATADV_IF_DEFAULT, best_router, BATADV_IF_DEFAULT) <= 0) { batadv_neigh_node_put(router); continue; } /* release the refcount for the "old" best */ batadv_neigh_node_put(best_router); best_entry = orig_entry; best_router = router; } batadv_neigh_node_put(best_router); return best_entry; } /** * batadv_tt_global_dump_subentry() - Dump all TT local entries into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @common: tt local & tt global common data * @orig: Originator node announcing a non-mesh client * @best: Is the best originator for the TT entry * * Return: Error code, or 0 on success */ static int batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_tt_common_entry *common, struct batadv_tt_orig_list_entry *orig, bool best) { u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags; void *hdr; struct batadv_orig_node_vlan *vlan; u8 last_ttvn; u32 crc; vlan = batadv_orig_node_vlan_get(orig->orig_node, common->vid); if (!vlan) return 0; crc = vlan->tt.crc; batadv_orig_node_vlan_put(vlan); hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_TRANSTABLE_GLOBAL); if (!hdr) return -ENOBUFS; last_ttvn = atomic_read(&orig->orig_node->last_ttvn); if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) || nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig->orig_node->orig) || nla_put_u8(msg, BATADV_ATTR_TT_TTVN, orig->ttvn) || nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags)) goto nla_put_failure; if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } /** * batadv_tt_global_dump_entry() - Dump one TT global entry into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the soft interface information * @common: tt local & tt global common data * @sub_s: Number of entries to skip * * This function assumes the caller holds rcu_read_lock(). * * Return: Error code, or 0 on success */ static int batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_tt_common_entry *common, int *sub_s) { struct batadv_tt_orig_list_entry *orig_entry, *best_entry; struct batadv_tt_global_entry *global; struct hlist_head *head; int sub = 0; bool best; global = container_of(common, struct batadv_tt_global_entry, common); best_entry = batadv_transtable_best_orig(bat_priv, global); head = &global->orig_list; hlist_for_each_entry_rcu(orig_entry, head, list) { if (sub++ < *sub_s) continue; best = (orig_entry == best_entry); if (batadv_tt_global_dump_subentry(msg, portid, seq, common, orig_entry, best)) { *sub_s = sub - 1; return -EMSGSIZE; } } *sub_s = 0; return 0; } /** * batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the soft interface information * @head: Pointer to the list containing the global tt entries * @idx_s: Number of entries to skip * @sub: Number of entries to skip * * Return: Error code, or 0 on success */ static int batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct hlist_head *head, int *idx_s, int *sub) { struct batadv_tt_common_entry *common; int idx = 0; rcu_read_lock(); hlist_for_each_entry_rcu(common, head, hash_entry) { if (idx++ < *idx_s) continue; if (batadv_tt_global_dump_entry(msg, portid, seq, bat_priv, common, sub)) { rcu_read_unlock(); *idx_s = idx - 1; return -EMSGSIZE; } } rcu_read_unlock(); *idx_s = 0; *sub = 0; return 0; } /** * batadv_tt_global_dump() - Dump TT global entries into a message * @msg: Netlink message to dump into * @cb: Parameters from query * * Return: Error code, or length of message on success */ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct net *net = sock_net(cb->skb->sk); struct net_device *soft_iface; struct batadv_priv *bat_priv; struct batadv_hard_iface *primary_if = NULL; struct batadv_hashtable *hash; struct hlist_head *head; int ret; int ifindex; int bucket = cb->args[0]; int idx = cb->args[1]; int sub = cb->args[2]; int portid = NETLINK_CB(cb->skb).portid; ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); if (!ifindex) return -EINVAL; soft_iface = dev_get_by_index(net, ifindex); if (!soft_iface || !batadv_softif_is_valid(soft_iface)) { ret = -ENODEV; goto out; } bat_priv = netdev_priv(soft_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; goto out; } hash = bat_priv->tt.global_hash; while (bucket < hash->size) { head = &hash->table[bucket]; if (batadv_tt_global_dump_bucket(msg, portid, cb->nlh->nlmsg_seq, bat_priv, head, &idx, &sub)) break; bucket++; } ret = msg->len; out: batadv_hardif_put(primary_if); dev_put(soft_iface); cb->args[0] = bucket; cb->args[1] = idx; cb->args[2] = sub; return ret; } /** * _batadv_tt_global_del_orig_entry() - remove and free an orig_entry * @tt_global_entry: the global entry to remove the orig_entry from * @orig_entry: the orig entry to remove and free * * Remove an orig_entry from its list in the given tt_global_entry and * free this orig_entry afterwards. * * Caller must hold tt_global_entry->list_lock and ensure orig_entry->list is * part of a list. */ static void _batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry, struct batadv_tt_orig_list_entry *orig_entry) { lockdep_assert_held(&tt_global_entry->list_lock); batadv_tt_global_size_dec(orig_entry->orig_node, tt_global_entry->common.vid); atomic_dec(&tt_global_entry->orig_list_count); /* requires holding tt_global_entry->list_lock and orig_entry->list * being part of a list */ hlist_del_rcu(&orig_entry->list); batadv_tt_orig_list_entry_put(orig_entry); } /* deletes the orig list of a tt_global_entry */ static void batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) { struct hlist_head *head; struct hlist_node *safe; struct batadv_tt_orig_list_entry *orig_entry; spin_lock_bh(&tt_global_entry->list_lock); head = &tt_global_entry->orig_list; hlist_for_each_entry_safe(orig_entry, safe, head, list) _batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry); spin_unlock_bh(&tt_global_entry->list_lock); } /** * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry * @bat_priv: the bat priv with all the soft interface information * @tt_global_entry: the global entry to remove the orig_node from * @orig_node: the originator announcing the client * @message: message to append to the log on deletion * * Remove the given orig_node and its according orig_entry from the given * global tt entry. */ static void batadv_tt_global_del_orig_node(struct batadv_priv *bat_priv, struct batadv_tt_global_entry *tt_global_entry, struct batadv_orig_node *orig_node, const char *message) { struct hlist_head *head; struct hlist_node *safe; struct batadv_tt_orig_list_entry *orig_entry; unsigned short vid; spin_lock_bh(&tt_global_entry->list_lock); head = &tt_global_entry->orig_list; hlist_for_each_entry_safe(orig_entry, safe, head, list) { if (orig_entry->orig_node == orig_node) { vid = tt_global_entry->common.vid; batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting %pM from global tt entry %pM (vid: %d): %s\n", orig_node->orig, tt_global_entry->common.addr, batadv_print_vid(vid), message); _batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry); } } spin_unlock_bh(&tt_global_entry->list_lock); } /* If the client is to be deleted, we check if it is the last origantor entry * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the * timer, otherwise we simply remove the originator scheduled for deletion. */ static void batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, struct batadv_tt_global_entry *tt_global_entry, struct batadv_orig_node *orig_node, const char *message) { bool last_entry = true; struct hlist_head *head; struct batadv_tt_orig_list_entry *orig_entry; /* no local entry exists, case 1: * Check if this is the last one or if other entries exist. */ rcu_read_lock(); head = &tt_global_entry->orig_list; hlist_for_each_entry_rcu(orig_entry, head, list) { if (orig_entry->orig_node != orig_node) { last_entry = false; break; } } rcu_read_unlock(); if (last_entry) { /* its the last one, mark for roaming. */ tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM; tt_global_entry->roam_at = jiffies; } else { /* there is another entry, we can simply delete this * one and can still use the other one. */ batadv_tt_global_del_orig_node(bat_priv, tt_global_entry, orig_node, message); } } /** * batadv_tt_global_del() - remove a client from the global table * @bat_priv: the bat priv with all the soft interface information * @orig_node: an originator serving this client * @addr: the mac address of the client * @vid: VLAN identifier * @message: a message explaining the reason for deleting the client to print * for debugging purpose * @roaming: true if the deletion has been triggered by a roaming event */ static void batadv_tt_global_del(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, const unsigned char *addr, unsigned short vid, const char *message, bool roaming) { struct batadv_tt_global_entry *tt_global_entry; struct batadv_tt_local_entry *local_entry = NULL; tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid); if (!tt_global_entry) goto out; if (!roaming) { batadv_tt_global_del_orig_node(bat_priv, tt_global_entry, orig_node, message); if (hlist_empty(&tt_global_entry->orig_list)) batadv_tt_global_free(bat_priv, tt_global_entry, message); goto out; } /* if we are deleting a global entry due to a roam * event, there are two possibilities: * 1) the client roamed from node A to node B => if there * is only one originator left for this client, we mark * it with BATADV_TT_CLIENT_ROAM, we start a timer and we * wait for node B to claim it. In case of timeout * the entry is purged. * * If there are other originators left, we directly delete * the originator. * 2) the client roamed to us => we can directly delete * the global entry, since it is useless now. */ local_entry = batadv_tt_local_hash_find(bat_priv, tt_global_entry->common.addr, vid); if (local_entry) { /* local entry exists, case 2: client roamed to us. */ batadv_tt_global_del_orig_list(tt_global_entry); batadv_tt_global_free(bat_priv, tt_global_entry, message); } else { /* no local entry exists, case 1: check for roaming */ batadv_tt_global_del_roaming(bat_priv, tt_global_entry, orig_node, message); } out: batadv_tt_global_entry_put(tt_global_entry); batadv_tt_local_entry_put(local_entry); } /** * batadv_tt_global_del_orig() - remove all the TT global entries belonging to * the given originator matching the provided vid * @bat_priv: the bat priv with all the soft interface information * @orig_node: the originator owning the entries to remove * @match_vid: the VLAN identifier to match. If negative all the entries will be * removed * @message: debug message to print as "reason" */ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, s32 match_vid, const char *message) { struct batadv_tt_global_entry *tt_global; struct batadv_tt_common_entry *tt_common_entry; u32 i; struct batadv_hashtable *hash = bat_priv->tt.global_hash; struct hlist_node *safe; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ unsigned short vid; if (!hash) return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, safe, head, hash_entry) { /* remove only matching entries */ if (match_vid >= 0 && tt_common_entry->vid != match_vid) continue; tt_global = container_of(tt_common_entry, struct batadv_tt_global_entry, common); batadv_tt_global_del_orig_node(bat_priv, tt_global, orig_node, message); if (hlist_empty(&tt_global->orig_list)) { vid = tt_global->common.vid; batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting global tt entry %pM (vid: %d): %s\n", tt_global->common.addr, batadv_print_vid(vid), message); hlist_del_rcu(&tt_common_entry->hash_entry); batadv_tt_global_entry_put(tt_global); } } spin_unlock_bh(list_lock); } clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized); } static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global, char **msg) { bool purge = false; unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT; unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT; if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) && batadv_has_timed_out(tt_global->roam_at, roam_timeout)) { purge = true; *msg = "Roaming timeout\n"; } if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) && batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) { purge = true; *msg = "Temporary client timeout\n"; } return purge; } static void batadv_tt_global_purge(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->tt.global_hash; struct hlist_head *head; struct hlist_node *node_tmp; spinlock_t *list_lock; /* protects write access to the hash lists */ u32 i; char *msg = NULL; struct batadv_tt_common_entry *tt_common; struct batadv_tt_global_entry *tt_global; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common, node_tmp, head, hash_entry) { tt_global = container_of(tt_common, struct batadv_tt_global_entry, common); if (!batadv_tt_global_to_purge(tt_global, &msg)) continue; batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting global tt entry %pM (vid: %d): %s\n", tt_global->common.addr, batadv_print_vid(tt_global->common.vid), msg); hlist_del_rcu(&tt_common->hash_entry); batadv_tt_global_entry_put(tt_global); } spin_unlock_bh(list_lock); } } static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_global_entry *tt_global; struct hlist_node *node_tmp; struct hlist_head *head; u32 i; if (!bat_priv->tt.global_hash) return; hash = bat_priv->tt.global_hash; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common_entry, node_tmp, head, hash_entry) { hlist_del_rcu(&tt_common_entry->hash_entry); tt_global = container_of(tt_common_entry, struct batadv_tt_global_entry, common); batadv_tt_global_entry_put(tt_global); } spin_unlock_bh(list_lock); } batadv_hash_destroy(hash); bat_priv->tt.global_hash = NULL; } static bool _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry, struct batadv_tt_global_entry *tt_global_entry) { if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI && tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI) return true; /* check if the two clients are marked as isolated */ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_ISOLA && tt_global_entry->common.flags & BATADV_TT_CLIENT_ISOLA) return true; return false; } /** * batadv_transtable_search() - get the mesh destination for a given client * @bat_priv: the bat priv with all the soft interface information * @src: mac address of the source client * @addr: mac address of the destination client * @vid: VLAN identifier * * Return: a pointer to the originator that was selected as destination in the * mesh for contacting the client 'addr', NULL otherwise. * In case of multiple originators serving the same client, the function returns * the best one (best in terms of metric towards the destination node). * * If the two clients are AP isolated the function returns NULL. */ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv, const u8 *src, const u8 *addr, unsigned short vid) { struct batadv_tt_local_entry *tt_local_entry = NULL; struct batadv_tt_global_entry *tt_global_entry = NULL; struct batadv_orig_node *orig_node = NULL; struct batadv_tt_orig_list_entry *best_entry; if (src && batadv_vlan_ap_isola_get(bat_priv, vid)) { tt_local_entry = batadv_tt_local_hash_find(bat_priv, src, vid); if (!tt_local_entry || (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)) goto out; } tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid); if (!tt_global_entry) goto out; /* check whether the clients should not communicate due to AP * isolation */ if (tt_local_entry && _batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) goto out; rcu_read_lock(); best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry); /* found anything? */ if (best_entry) orig_node = best_entry->orig_node; if (orig_node && !kref_get_unless_zero(&orig_node->refcount)) orig_node = NULL; rcu_read_unlock(); out: batadv_tt_global_entry_put(tt_global_entry); batadv_tt_local_entry_put(tt_local_entry); return orig_node; } /** * batadv_tt_global_crc() - calculates the checksum of the local table belonging * to the given orig_node * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator for which the CRC should be computed * @vid: VLAN identifier for which the CRC32 has to be computed * * This function computes the checksum for the global table corresponding to a * specific originator. In particular, the checksum is computed as follows: For * each client connected to the originator the CRC32C of the MAC address and the * VID is computed and then all the CRC32Cs of the various clients are xor'ed * together. * * The idea behind is that CRC32C should be used as much as possible in order to * produce a unique hash of the table, but since the order which is used to feed * the CRC32C function affects the result and since every node in the network * probably sorts the clients differently, the hash function cannot be directly * computed over the entire table. Hence the CRC32C is used only on * the single client entry, while all the results are then xor'ed together * because the XOR operation can combine them all while trying to reduce the * noise as much as possible. * * Return: the checksum of the global table of a given originator. */ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->tt.global_hash; struct batadv_tt_orig_list_entry *tt_orig; struct batadv_tt_common_entry *tt_common; struct batadv_tt_global_entry *tt_global; struct hlist_head *head; u32 i, crc_tmp, crc = 0; u8 flags; __be16 tmp_vid; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(tt_common, head, hash_entry) { tt_global = container_of(tt_common, struct batadv_tt_global_entry, common); /* compute the CRC only for entries belonging to the * VLAN identified by the vid passed as parameter */ if (tt_common->vid != vid) continue; /* Roaming clients are in the global table for * consistency only. They don't have to be * taken into account while computing the * global crc */ if (tt_common->flags & BATADV_TT_CLIENT_ROAM) continue; /* Temporary clients have not been announced yet, so * they have to be skipped while computing the global * crc */ if (tt_common->flags & BATADV_TT_CLIENT_TEMP) continue; /* find out if this global entry is announced by this * originator */ tt_orig = batadv_tt_global_orig_entry_find(tt_global, orig_node); if (!tt_orig) continue; /* use network order to read the VID: this ensures that * every node reads the bytes in the same order. */ tmp_vid = htons(tt_common->vid); crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); /* compute the CRC on flags that have to be kept in sync * among nodes */ flags = tt_orig->flags; crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); batadv_tt_orig_list_entry_put(tt_orig); } rcu_read_unlock(); } return crc; } /** * batadv_tt_local_crc() - calculates the checksum of the local table * @bat_priv: the bat priv with all the soft interface information * @vid: VLAN identifier for which the CRC32 has to be computed * * For details about the computation, please refer to the documentation for * batadv_tt_global_crc(). * * Return: the checksum of the local table */ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common; struct hlist_head *head; u32 i, crc_tmp, crc = 0; u8 flags; __be16 tmp_vid; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(tt_common, head, hash_entry) { /* compute the CRC only for entries belonging to the * VLAN identified by vid */ if (tt_common->vid != vid) continue; /* not yet committed clients have not to be taken into * account while computing the CRC */ if (tt_common->flags & BATADV_TT_CLIENT_NEW) continue; /* use network order to read the VID: this ensures that * every node reads the bytes in the same order. */ tmp_vid = htons(tt_common->vid); crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid)); /* compute the CRC on flags that have to be kept in sync * among nodes */ flags = tt_common->flags & BATADV_TT_SYNC_MASK; crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); } rcu_read_unlock(); } return crc; } /** * batadv_tt_req_node_release() - free tt_req node entry * @ref: kref pointer of the tt req_node entry */ static void batadv_tt_req_node_release(struct kref *ref) { struct batadv_tt_req_node *tt_req_node; tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount); kmem_cache_free(batadv_tt_req_cache, tt_req_node); } /** * batadv_tt_req_node_put() - decrement the tt_req_node refcounter and * possibly release it * @tt_req_node: tt_req_node to be free'd */ static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node) { if (!tt_req_node) return; kref_put(&tt_req_node->refcount, batadv_tt_req_node_release); } static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) { struct batadv_tt_req_node *node; struct hlist_node *safe; spin_lock_bh(&bat_priv->tt.req_list_lock); hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { hlist_del_init(&node->list); batadv_tt_req_node_put(node); } spin_unlock_bh(&bat_priv->tt.req_list_lock); } static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, const void *tt_buff, u16 tt_buff_len) { /* Replace the old buffer only if I received something in the * last OGM (the OGM could carry no changes) */ spin_lock_bh(&orig_node->tt_buff_lock); if (tt_buff_len > 0) { kfree(orig_node->tt_buff); orig_node->tt_buff_len = 0; orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); if (orig_node->tt_buff) { memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); orig_node->tt_buff_len = tt_buff_len; } } spin_unlock_bh(&orig_node->tt_buff_lock); } static void batadv_tt_req_purge(struct batadv_priv *bat_priv) { struct batadv_tt_req_node *node; struct hlist_node *safe; spin_lock_bh(&bat_priv->tt.req_list_lock); hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { if (batadv_has_timed_out(node->issued_at, BATADV_TT_REQUEST_TIMEOUT)) { hlist_del_init(&node->list); batadv_tt_req_node_put(node); } } spin_unlock_bh(&bat_priv->tt.req_list_lock); } /** * batadv_tt_req_node_new() - search and possibly create a tt_req_node object * @bat_priv: the bat priv with all the soft interface information * @orig_node: orig node this request is being issued for * * Return: the pointer to the new tt_req_node struct if no request * has already been issued for this orig_node, NULL otherwise. */ static struct batadv_tt_req_node * batadv_tt_req_node_new(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; spin_lock_bh(&bat_priv->tt.req_list_lock); hlist_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) { if (batadv_compare_eth(tt_req_node_tmp, orig_node) && !batadv_has_timed_out(tt_req_node_tmp->issued_at, BATADV_TT_REQUEST_TIMEOUT)) goto unlock; } tt_req_node = kmem_cache_alloc(batadv_tt_req_cache, GFP_ATOMIC); if (!tt_req_node) goto unlock; kref_init(&tt_req_node->refcount); ether_addr_copy(tt_req_node->addr, orig_node->orig); tt_req_node->issued_at = jiffies; kref_get(&tt_req_node->refcount); hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list); unlock: spin_unlock_bh(&bat_priv->tt.req_list_lock); return tt_req_node; } /** * batadv_tt_local_valid() - verify local tt entry and get flags * @entry_ptr: to be checked local tt entry * @data_ptr: not used but definition required to satisfy the callback prototype * @flags: a pointer to store TT flags for this client to * * Checks the validity of the given local TT entry. If it is, then the provided * flags pointer is updated. * * Return: true if the entry is a valid, false otherwise. */ static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr, u8 *flags) { const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW) return false; if (flags) *flags = tt_common_entry->flags; return true; } /** * batadv_tt_global_valid() - verify global tt entry and get flags * @entry_ptr: to be checked global tt entry * @data_ptr: an orig_node object (may be NULL) * @flags: a pointer to store TT flags for this client to * * Checks the validity of the given global TT entry. If it is, then the provided * flags pointer is updated either with the common (summed) TT flags if data_ptr * is NULL or the specific, per originator TT flags otherwise. * * Return: true if the entry is a valid, false otherwise. */ static bool batadv_tt_global_valid(const void *entry_ptr, const void *data_ptr, u8 *flags) { const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; const struct batadv_tt_global_entry *tt_global_entry; const struct batadv_orig_node *orig_node = data_ptr; if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM || tt_common_entry->flags & BATADV_TT_CLIENT_TEMP) return false; tt_global_entry = container_of(tt_common_entry, struct batadv_tt_global_entry, common); return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node, flags); } /** * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the * specified tt hash * @bat_priv: the bat priv with all the soft interface information * @hash: hash table containing the tt entries * @tt_len: expected tvlv tt data buffer length in number of bytes * @tvlv_buff: pointer to the buffer to fill with the TT data * @valid_cb: function to filter tt change entries and to return TT flags * @cb_data: data passed to the filter function as argument * * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb * is not provided then this becomes a no-op. */ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, struct batadv_hashtable *hash, void *tvlv_buff, u16 tt_len, bool (*valid_cb)(const void *, const void *, u8 *flags), void *cb_data) { struct batadv_tt_common_entry *tt_common_entry; struct batadv_tvlv_tt_change *tt_change; struct hlist_head *head; u16 tt_tot, tt_num_entries = 0; u8 flags; bool ret; u32 i; tt_tot = batadv_tt_entries(tt_len); tt_change = tvlv_buff; if (!valid_cb) return; rcu_read_lock(); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) { if (tt_tot == tt_num_entries) break; ret = valid_cb(tt_common_entry, cb_data, &flags); if (!ret) continue; ether_addr_copy(tt_change->addr, tt_common_entry->addr); tt_change->flags = flags; tt_change->vid = htons(tt_common_entry->vid); memset(tt_change->reserved, 0, sizeof(tt_change->reserved)); tt_num_entries++; tt_change++; } } rcu_read_unlock(); } /** * batadv_tt_global_check_crc() - check if all the CRCs are correct * @orig_node: originator for which the CRCs have to be checked * @tt_vlan: pointer to the first tvlv VLAN entry * @num_vlan: number of tvlv VLAN entries * * Return: true if all the received CRCs match the locally stored ones, false * otherwise */ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, struct batadv_tvlv_tt_vlan_data *tt_vlan, u16 num_vlan) { struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp; struct batadv_orig_node_vlan *vlan; int i, orig_num_vlan; u32 crc; /* check if each received CRC matches the locally stored one */ for (i = 0; i < num_vlan; i++) { tt_vlan_tmp = tt_vlan + i; /* if orig_node is a backbone node for this VLAN, don't check * the CRC as we ignore all the global entries over it */ if (batadv_bla_is_backbone_gw_orig(orig_node->bat_priv, orig_node->orig, ntohs(tt_vlan_tmp->vid))) continue; vlan = batadv_orig_node_vlan_get(orig_node, ntohs(tt_vlan_tmp->vid)); if (!vlan) return false; crc = vlan->tt.crc; batadv_orig_node_vlan_put(vlan); if (crc != ntohl(tt_vlan_tmp->crc)) return false; } /* check if any excess VLANs exist locally for the originator * which are not mentioned in the TVLV from the originator. */ rcu_read_lock(); orig_num_vlan = 0; hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) orig_num_vlan++; rcu_read_unlock(); if (orig_num_vlan > num_vlan) return false; return true; } /** * batadv_tt_local_update_crc() - update all the local CRCs * @bat_priv: the bat priv with all the soft interface information */ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv) { struct batadv_softif_vlan *vlan; /* recompute the global CRC for each VLAN */ rcu_read_lock(); hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid); } rcu_read_unlock(); } /** * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node * @bat_priv: the bat priv with all the soft interface information * @orig_node: the orig_node for which the CRCs have to be updated */ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_orig_node_vlan *vlan; u32 crc; /* recompute the global CRC for each VLAN */ rcu_read_lock(); hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) { /* if orig_node is a backbone node for this VLAN, don't compute * the CRC as we ignore all the global entries over it */ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vlan->vid)) continue; crc = batadv_tt_global_crc(bat_priv, orig_node, vlan->vid); vlan->tt.crc = crc; } rcu_read_unlock(); } /** * batadv_send_tt_request() - send a TT Request message to a given node * @bat_priv: the bat priv with all the soft interface information * @dst_orig_node: the destination of the message * @ttvn: the version number that the source of the message is looking for * @tt_vlan: pointer to the first tvlv VLAN object to request * @num_vlan: number of tvlv VLAN entries * @full_table: ask for the entire translation table if true, while only for the * last TT diff otherwise * * Return: true if the TT Request was sent, false otherwise */ static bool batadv_send_tt_request(struct batadv_priv *bat_priv, struct batadv_orig_node *dst_orig_node, u8 ttvn, struct batadv_tvlv_tt_vlan_data *tt_vlan, u16 num_vlan, bool full_table) { struct batadv_tvlv_tt_data *tvlv_tt_data = NULL; struct batadv_tt_req_node *tt_req_node = NULL; struct batadv_tvlv_tt_vlan_data *tt_vlan_req; struct batadv_hard_iface *primary_if; bool ret = false; int i, size; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* The new tt_req will be issued only if I'm not waiting for a * reply from the same orig_node yet */ tt_req_node = batadv_tt_req_node_new(bat_priv, dst_orig_node); if (!tt_req_node) goto out; size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan; tvlv_tt_data = kzalloc(size, GFP_ATOMIC); if (!tvlv_tt_data) goto out; tvlv_tt_data->flags = BATADV_TT_REQUEST; tvlv_tt_data->ttvn = ttvn; tvlv_tt_data->num_vlan = htons(num_vlan); /* send all the CRCs within the request. This is needed by intermediate * nodes to ensure they have the correct table before replying */ tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1); for (i = 0; i < num_vlan; i++) { tt_vlan_req->vid = tt_vlan->vid; tt_vlan_req->crc = tt_vlan->crc; tt_vlan_req++; tt_vlan++; } if (full_table) tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE; batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n", dst_orig_node->orig, full_table ? 'F' : '.'); batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX); batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr, dst_orig_node->orig, BATADV_TVLV_TT, 1, tvlv_tt_data, size); ret = true; out: batadv_hardif_put(primary_if); if (ret && tt_req_node) { spin_lock_bh(&bat_priv->tt.req_list_lock); if (!hlist_unhashed(&tt_req_node->list)) { hlist_del_init(&tt_req_node->list); batadv_tt_req_node_put(tt_req_node); } spin_unlock_bh(&bat_priv->tt.req_list_lock); } batadv_tt_req_node_put(tt_req_node); kfree(tvlv_tt_data); return ret; } /** * batadv_send_other_tt_response() - send reply to tt request concerning another * node's translation table * @bat_priv: the bat priv with all the soft interface information * @tt_data: tt data containing the tt request information * @req_src: mac address of tt request sender * @req_dst: mac address of tt request recipient * * Return: true if tt request reply was sent, false otherwise. */ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv, struct batadv_tvlv_tt_data *tt_data, u8 *req_src, u8 *req_dst) { struct batadv_orig_node *req_dst_orig_node; struct batadv_orig_node *res_dst_orig_node = NULL; struct batadv_tvlv_tt_change *tt_change; struct batadv_tvlv_tt_data *tvlv_tt_data = NULL; struct batadv_tvlv_tt_vlan_data *tt_vlan; bool ret = false, full_table; u8 orig_ttvn, req_ttvn; u16 tvlv_len; s32 tt_len; batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n", req_src, tt_data->ttvn, req_dst, ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.')); /* Let's get the orig node of the REAL destination */ req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst); if (!req_dst_orig_node) goto out; res_dst_orig_node = batadv_orig_hash_find(bat_priv, req_src); if (!res_dst_orig_node) goto out; orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn); req_ttvn = tt_data->ttvn; tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1); /* this node doesn't have the requested data */ if (orig_ttvn != req_ttvn || !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan, ntohs(tt_data->num_vlan))) goto out; /* If the full table has been explicitly requested */ if (tt_data->flags & BATADV_TT_FULL_TABLE || !req_dst_orig_node->tt_buff) full_table = true; else full_table = false; /* TT fragmentation hasn't been implemented yet, so send as many * TT entries fit a single packet as possible only */ if (!full_table) { spin_lock_bh(&req_dst_orig_node->tt_buff_lock); tt_len = req_dst_orig_node->tt_buff_len; tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node, &tvlv_tt_data, &tt_change, &tt_len); if (!tt_len) goto unlock; /* Copy the last orig_node's OGM buffer */ memcpy(tt_change, req_dst_orig_node->tt_buff, req_dst_orig_node->tt_buff_len); spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); } else { /* allocate the tvlv, put the tt_data and all the tt_vlan_data * in the initial part */ tt_len = -1; tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node, &tvlv_tt_data, &tt_change, &tt_len); if (!tt_len) goto out; /* fill the rest of the tvlv with the real TT entries */ batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash, tt_change, tt_len, batadv_tt_global_valid, req_dst_orig_node); } /* Don't send the response, if larger than fragmented packet. */ tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len; if (tt_len > atomic_read(&bat_priv->packet_size_max)) { net_ratelimited_function(batadv_info, bat_priv->soft_iface, "Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n", res_dst_orig_node->orig); goto out; } tvlv_tt_data->flags = BATADV_TT_RESPONSE; tvlv_tt_data->ttvn = req_ttvn; if (full_table) tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE; batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_RESPONSE %pM for %pM [%c] (ttvn: %u)\n", res_dst_orig_node->orig, req_dst_orig_node->orig, full_table ? 'F' : '.', req_ttvn); batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); batadv_tvlv_unicast_send(bat_priv, req_dst_orig_node->orig, req_src, BATADV_TVLV_TT, 1, tvlv_tt_data, tvlv_len); ret = true; goto out; unlock: spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); out: batadv_orig_node_put(res_dst_orig_node); batadv_orig_node_put(req_dst_orig_node); kfree(tvlv_tt_data); return ret; } /** * batadv_send_my_tt_response() - send reply to tt request concerning this * node's translation table * @bat_priv: the bat priv with all the soft interface information * @tt_data: tt data containing the tt request information * @req_src: mac address of tt request sender * * Return: true if tt request reply was sent, false otherwise. */ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv, struct batadv_tvlv_tt_data *tt_data, u8 *req_src) { struct batadv_tvlv_tt_data *tvlv_tt_data = NULL; struct batadv_hard_iface *primary_if = NULL; struct batadv_tvlv_tt_change *tt_change; struct batadv_orig_node *orig_node; u8 my_ttvn, req_ttvn; u16 tvlv_len; bool full_table; s32 tt_len; batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n", req_src, tt_data->ttvn, ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.')); spin_lock_bh(&bat_priv->tt.commit_lock); my_ttvn = (u8)atomic_read(&bat_priv->tt.vn); req_ttvn = tt_data->ttvn; orig_node = batadv_orig_hash_find(bat_priv, req_src); if (!orig_node) goto out; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* If the full table has been explicitly requested or the gap * is too big send the whole local translation table */ if (tt_data->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn || !bat_priv->tt.last_changeset) full_table = true; else full_table = false; /* TT fragmentation hasn't been implemented yet, so send as many * TT entries fit a single packet as possible only */ if (!full_table) { spin_lock_bh(&bat_priv->tt.last_changeset_lock); tt_len = bat_priv->tt.last_changeset_len; tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tvlv_tt_data, &tt_change, &tt_len); if (!tt_len || !tvlv_len) goto unlock; /* Copy the last orig_node's OGM buffer */ memcpy(tt_change, bat_priv->tt.last_changeset, bat_priv->tt.last_changeset_len); spin_unlock_bh(&bat_priv->tt.last_changeset_lock); } else { req_ttvn = (u8)atomic_read(&bat_priv->tt.vn); /* allocate the tvlv, put the tt_data and all the tt_vlan_data * in the initial part */ tt_len = -1; tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tvlv_tt_data, &tt_change, &tt_len); if (!tt_len || !tvlv_len) goto out; /* fill the rest of the tvlv with the real TT entries */ batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash, tt_change, tt_len, batadv_tt_local_valid, NULL); } tvlv_tt_data->flags = BATADV_TT_RESPONSE; tvlv_tt_data->ttvn = req_ttvn; if (full_table) tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE; batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_RESPONSE to %pM [%c] (ttvn: %u)\n", orig_node->orig, full_table ? 'F' : '.', req_ttvn); batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr, req_src, BATADV_TVLV_TT, 1, tvlv_tt_data, tvlv_len); goto out; unlock: spin_unlock_bh(&bat_priv->tt.last_changeset_lock); out: spin_unlock_bh(&bat_priv->tt.commit_lock); batadv_orig_node_put(orig_node); batadv_hardif_put(primary_if); kfree(tvlv_tt_data); /* The packet was for this host, so it doesn't need to be re-routed */ return true; } /** * batadv_send_tt_response() - send reply to tt request * @bat_priv: the bat priv with all the soft interface information * @tt_data: tt data containing the tt request information * @req_src: mac address of tt request sender * @req_dst: mac address of tt request recipient * * Return: true if tt request reply was sent, false otherwise. */ static bool batadv_send_tt_response(struct batadv_priv *bat_priv, struct batadv_tvlv_tt_data *tt_data, u8 *req_src, u8 *req_dst) { if (batadv_is_my_mac(bat_priv, req_dst)) return batadv_send_my_tt_response(bat_priv, tt_data, req_src); return batadv_send_other_tt_response(bat_priv, tt_data, req_src, req_dst); } static void _batadv_tt_update_changes(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_tvlv_tt_change *tt_change, u16 tt_num_changes, u8 ttvn) { int i; int roams; for (i = 0; i < tt_num_changes; i++) { if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) { roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM; batadv_tt_global_del(bat_priv, orig_node, (tt_change + i)->addr, ntohs((tt_change + i)->vid), "tt removed by changes", roams); } else { if (!batadv_tt_global_add(bat_priv, orig_node, (tt_change + i)->addr, ntohs((tt_change + i)->vid), (tt_change + i)->flags, ttvn)) /* In case of problem while storing a * global_entry, we stop the updating * procedure without committing the * ttvn change. This will avoid to send * corrupted data on tt_request */ return; } } set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized); } static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv, struct batadv_tvlv_tt_change *tt_change, u8 ttvn, u8 *resp_src, u16 num_entries) { struct batadv_orig_node *orig_node; orig_node = batadv_orig_hash_find(bat_priv, resp_src); if (!orig_node) goto out; /* Purge the old table first.. */ batadv_tt_global_del_orig(bat_priv, orig_node, -1, "Received full table"); _batadv_tt_update_changes(bat_priv, orig_node, tt_change, num_entries, ttvn); spin_lock_bh(&orig_node->tt_buff_lock); kfree(orig_node->tt_buff); orig_node->tt_buff_len = 0; orig_node->tt_buff = NULL; spin_unlock_bh(&orig_node->tt_buff_lock); atomic_set(&orig_node->last_ttvn, ttvn); out: batadv_orig_node_put(orig_node); } static void batadv_tt_update_changes(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, u16 tt_num_changes, u8 ttvn, struct batadv_tvlv_tt_change *tt_change) { _batadv_tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, ttvn); batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change, batadv_tt_len(tt_num_changes)); atomic_set(&orig_node->last_ttvn, ttvn); } /** * batadv_is_my_client() - check if a client is served by the local node * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the client to check * @vid: VLAN identifier * * Return: true if the client is served by this node, false otherwise. */ bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_tt_local_entry *tt_local_entry; bool ret = false; tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) goto out; /* Check if the client has been logically deleted (but is kept for * consistency purpose) */ if ((tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) || (tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM)) goto out; ret = true; out: batadv_tt_local_entry_put(tt_local_entry); return ret; } /** * batadv_handle_tt_response() - process incoming tt reply * @bat_priv: the bat priv with all the soft interface information * @tt_data: tt data containing the tt request information * @resp_src: mac address of tt reply sender * @num_entries: number of tt change entries appended to the tt data */ static void batadv_handle_tt_response(struct batadv_priv *bat_priv, struct batadv_tvlv_tt_data *tt_data, u8 *resp_src, u16 num_entries) { struct batadv_tt_req_node *node; struct hlist_node *safe; struct batadv_orig_node *orig_node = NULL; struct batadv_tvlv_tt_change *tt_change; u8 *tvlv_ptr = (u8 *)tt_data; u16 change_offset; batadv_dbg(BATADV_DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n", resp_src, tt_data->ttvn, num_entries, ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.')); orig_node = batadv_orig_hash_find(bat_priv, resp_src); if (!orig_node) goto out; spin_lock_bh(&orig_node->tt_lock); change_offset = sizeof(struct batadv_tvlv_tt_vlan_data); change_offset *= ntohs(tt_data->num_vlan); change_offset += sizeof(*tt_data); tvlv_ptr += change_offset; tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr; if (tt_data->flags & BATADV_TT_FULL_TABLE) { batadv_tt_fill_gtable(bat_priv, tt_change, tt_data->ttvn, resp_src, num_entries); } else { batadv_tt_update_changes(bat_priv, orig_node, num_entries, tt_data->ttvn, tt_change); } /* Recalculate the CRC for this orig_node and store it */ batadv_tt_global_update_crc(bat_priv, orig_node); spin_unlock_bh(&orig_node->tt_lock); /* Delete the tt_req_node from pending tt_requests list */ spin_lock_bh(&bat_priv->tt.req_list_lock); hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { if (!batadv_compare_eth(node->addr, resp_src)) continue; hlist_del_init(&node->list); batadv_tt_req_node_put(node); } spin_unlock_bh(&bat_priv->tt.req_list_lock); out: batadv_orig_node_put(orig_node); } static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv) { struct batadv_tt_roam_node *node, *safe; spin_lock_bh(&bat_priv->tt.roam_list_lock); list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { list_del(&node->list); kmem_cache_free(batadv_tt_roam_cache, node); } spin_unlock_bh(&bat_priv->tt.roam_list_lock); } static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) { struct batadv_tt_roam_node *node, *safe; spin_lock_bh(&bat_priv->tt.roam_list_lock); list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { if (!batadv_has_timed_out(node->first_time, BATADV_ROAMING_MAX_TIME)) continue; list_del(&node->list); kmem_cache_free(batadv_tt_roam_cache, node); } spin_unlock_bh(&bat_priv->tt.roam_list_lock); } /** * batadv_tt_check_roam_count() - check if a client has roamed too frequently * @bat_priv: the bat priv with all the soft interface information * @client: mac address of the roaming client * * This function checks whether the client already reached the * maximum number of possible roaming phases. In this case the ROAMING_ADV * will not be sent. * * Return: true if the ROAMING_ADV can be sent, false otherwise */ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client) { struct batadv_tt_roam_node *tt_roam_node; bool ret = false; spin_lock_bh(&bat_priv->tt.roam_list_lock); /* The new tt_req will be issued only if I'm not waiting for a * reply from the same orig_node yet */ list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) { if (!batadv_compare_eth(tt_roam_node->addr, client)) continue; if (batadv_has_timed_out(tt_roam_node->first_time, BATADV_ROAMING_MAX_TIME)) continue; if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter)) /* Sorry, you roamed too many times! */ goto unlock; ret = true; break; } if (!ret) { tt_roam_node = kmem_cache_alloc(batadv_tt_roam_cache, GFP_ATOMIC); if (!tt_roam_node) goto unlock; tt_roam_node->first_time = jiffies; atomic_set(&tt_roam_node->counter, BATADV_ROAMING_MAX_COUNT - 1); ether_addr_copy(tt_roam_node->addr, client); list_add(&tt_roam_node->list, &bat_priv->tt.roam_list); ret = true; } unlock: spin_unlock_bh(&bat_priv->tt.roam_list_lock); return ret; } /** * batadv_send_roam_adv() - send a roaming advertisement message * @bat_priv: the bat priv with all the soft interface information * @client: mac address of the roaming client * @vid: VLAN identifier * @orig_node: message destination * * Send a ROAMING_ADV message to the node which was previously serving this * client. This is done to inform the node that from now on all traffic destined * for this particular roamed client has to be forwarded to the sender of the * roaming message. */ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client, unsigned short vid, struct batadv_orig_node *orig_node) { struct batadv_hard_iface *primary_if; struct batadv_tvlv_roam_adv tvlv_roam; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* before going on we have to check whether the client has * already roamed to us too many times */ if (!batadv_tt_check_roam_count(bat_priv, client)) goto out; batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n", orig_node->orig, client, batadv_print_vid(vid)); batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX); memcpy(tvlv_roam.client, client, sizeof(tvlv_roam.client)); tvlv_roam.vid = htons(vid); batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr, orig_node->orig, BATADV_TVLV_ROAM, 1, &tvlv_roam, sizeof(tvlv_roam)); out: batadv_hardif_put(primary_if); } static void batadv_tt_purge(struct work_struct *work) { struct delayed_work *delayed_work; struct batadv_priv_tt *priv_tt; struct batadv_priv *bat_priv; delayed_work = to_delayed_work(work); priv_tt = container_of(delayed_work, struct batadv_priv_tt, work); bat_priv = container_of(priv_tt, struct batadv_priv, tt); batadv_tt_local_purge(bat_priv, BATADV_TT_LOCAL_TIMEOUT); batadv_tt_global_purge(bat_priv); batadv_tt_req_purge(bat_priv); batadv_tt_roam_purge(bat_priv); queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work, msecs_to_jiffies(BATADV_TT_WORK_PERIOD)); } /** * batadv_tt_free() - Free translation table of soft interface * @bat_priv: the bat priv with all the soft interface information */ void batadv_tt_free(struct batadv_priv *bat_priv) { batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1); batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1); batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1); cancel_delayed_work_sync(&bat_priv->tt.work); batadv_tt_local_table_free(bat_priv); batadv_tt_global_table_free(bat_priv); batadv_tt_req_list_free(bat_priv); batadv_tt_changes_list_free(bat_priv); batadv_tt_roam_list_free(bat_priv); kfree(bat_priv->tt.last_changeset); } /** * batadv_tt_local_set_flags() - set or unset the specified flags on the local * table and possibly count them in the TT size * @bat_priv: the bat priv with all the soft interface information * @flags: the flag to switch * @enable: whether to set or unset the flag * @count: whether to increase the TT size by the number of changed entries */ static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags, bool enable, bool count) { struct batadv_hashtable *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common_entry; struct hlist_head *head; u32 i; if (!hash) return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) { if (enable) { if ((tt_common_entry->flags & flags) == flags) continue; tt_common_entry->flags |= flags; } else { if (!(tt_common_entry->flags & flags)) continue; tt_common_entry->flags &= ~flags; } if (!count) continue; batadv_tt_local_size_inc(bat_priv, tt_common_entry->vid); } rcu_read_unlock(); } } /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) { struct batadv_hashtable *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common; struct batadv_tt_local_entry *tt_local; struct hlist_node *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ u32 i; if (!hash) return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(tt_common, node_tmp, head, hash_entry) { if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) continue; batadv_dbg(BATADV_DBG_TT, bat_priv, "Deleting local tt entry (%pM, vid: %d): pending\n", tt_common->addr, batadv_print_vid(tt_common->vid)); batadv_tt_local_size_dec(bat_priv, tt_common->vid); hlist_del_rcu(&tt_common->hash_entry); tt_local = container_of(tt_common, struct batadv_tt_local_entry, common); batadv_tt_local_entry_put(tt_local); } spin_unlock_bh(list_lock); } } /** * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes * which have been queued in the time since the last commit * @bat_priv: the bat priv with all the soft interface information * * Caller must hold tt->commit_lock. */ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv) { lockdep_assert_held(&bat_priv->tt.commit_lock); if (atomic_read(&bat_priv->tt.local_changes) < 1) { if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt)) batadv_tt_tvlv_container_update(bat_priv); return; } batadv_tt_local_set_flags(bat_priv, BATADV_TT_CLIENT_NEW, false, true); batadv_tt_local_purge_pending_clients(bat_priv); batadv_tt_local_update_crc(bat_priv); /* Increment the TTVN only once per OGM interval */ atomic_inc(&bat_priv->tt.vn); batadv_dbg(BATADV_DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n", (u8)atomic_read(&bat_priv->tt.vn)); /* reset the sending counter */ atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); batadv_tt_tvlv_container_update(bat_priv); } /** * batadv_tt_local_commit_changes() - commit all pending local tt changes which * have been queued in the time since the last commit * @bat_priv: the bat priv with all the soft interface information */ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv) { spin_lock_bh(&bat_priv->tt.commit_lock); batadv_tt_local_commit_changes_nolock(bat_priv); spin_unlock_bh(&bat_priv->tt.commit_lock); } /** * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped * @bat_priv: the bat priv with all the soft interface information * @src: source mac address of packet * @dst: destination mac address of packet * @vid: vlan id of packet * * Return: true when src+dst(+vid) pair should be isolated, false otherwise */ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst, unsigned short vid) { struct batadv_tt_local_entry *tt_local_entry; struct batadv_tt_global_entry *tt_global_entry; struct batadv_softif_vlan *vlan; bool ret = false; vlan = batadv_softif_vlan_get(bat_priv, vid); if (!vlan) return false; if (!atomic_read(&vlan->ap_isolation)) goto vlan_put; tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid); if (!tt_local_entry) goto vlan_put; tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid); if (!tt_global_entry) goto local_entry_put; if (_batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) ret = true; batadv_tt_global_entry_put(tt_global_entry); local_entry_put: batadv_tt_local_entry_put(tt_local_entry); vlan_put: batadv_softif_vlan_put(vlan); return ret; } /** * batadv_tt_update_orig() - update global translation table with new tt * information received via ogms * @bat_priv: the bat priv with all the soft interface information * @orig_node: the orig_node of the ogm * @tt_buff: pointer to the first tvlv VLAN entry * @tt_num_vlan: number of tvlv VLAN entries * @tt_change: pointer to the first entry in the TT buffer * @tt_num_changes: number of tt changes inside the tt buffer * @ttvn: translation table version number of this changeset */ static void batadv_tt_update_orig(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, const void *tt_buff, u16 tt_num_vlan, struct batadv_tvlv_tt_change *tt_change, u16 tt_num_changes, u8 ttvn) { u8 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn); struct batadv_tvlv_tt_vlan_data *tt_vlan; bool full_table = true; bool has_tt_init; tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff; has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized); /* orig table not initialised AND first diff is in the OGM OR the ttvn * increased by one -> we can apply the attached changes */ if ((!has_tt_init && ttvn == 1) || ttvn - orig_ttvn == 1) { /* the OGM could not contain the changes due to their size or * because they have already been sent BATADV_TT_OGM_APPEND_MAX * times. * In this case send a tt request */ if (!tt_num_changes) { full_table = false; goto request_table; } spin_lock_bh(&orig_node->tt_lock); batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, tt_change); /* Even if we received the precomputed crc with the OGM, we * prefer to recompute it to spot any possible inconsistency * in the global table */ batadv_tt_global_update_crc(bat_priv, orig_node); spin_unlock_bh(&orig_node->tt_lock); /* The ttvn alone is not enough to guarantee consistency * because a single value could represent different states * (due to the wrap around). Thus a node has to check whether * the resulting table (after applying the changes) is still * consistent or not. E.g. a node could disconnect while its * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case * checking the CRC value is mandatory to detect the * inconsistency */ if (!batadv_tt_global_check_crc(orig_node, tt_vlan, tt_num_vlan)) goto request_table; } else { /* if we missed more than one change or our tables are not * in sync anymore -> request fresh tt data */ if (!has_tt_init || ttvn != orig_ttvn || !batadv_tt_global_check_crc(orig_node, tt_vlan, tt_num_vlan)) { request_table: batadv_dbg(BATADV_DBG_TT, bat_priv, "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u num_changes: %u)\n", orig_node->orig, ttvn, orig_ttvn, tt_num_changes); batadv_send_tt_request(bat_priv, orig_node, ttvn, tt_vlan, tt_num_vlan, full_table); return; } } } /** * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the client to check * @vid: VLAN identifier * * Return: true if we know that the client has moved from its old originator * to another one. This entry is still kept for consistency purposes and will be * deleted later by a DEL or because of timeout */ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid) { struct batadv_tt_global_entry *tt_global_entry; bool ret = false; tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid); if (!tt_global_entry) goto out; ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM; batadv_tt_global_entry_put(tt_global_entry); out: return ret; } /** * batadv_tt_local_client_is_roaming() - tells whether the client is roaming * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the local client to query * @vid: VLAN identifier * * Return: true if the local client is known to be roaming (it is not served by * this node anymore) or not. If yes, the client is still present in the table * to keep the latter consistent with the node TTVN */ bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid) { struct batadv_tt_local_entry *tt_local_entry; bool ret = false; tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) goto out; ret = tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM; batadv_tt_local_entry_put(tt_local_entry); out: return ret; } /** * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT * @bat_priv: the bat priv with all the soft interface information * @orig_node: orig node which the temporary entry should be associated with * @addr: mac address of the client * @vid: VLAN id of the new temporary global translation table * * Return: true when temporary tt entry could be added, false otherwise */ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, const unsigned char *addr, unsigned short vid) { /* ignore loop detect macs, they are not supposed to be in the tt local * data as well. */ if (batadv_bla_is_loopdetect_mac(addr)) return false; if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid, BATADV_TT_CLIENT_TEMP, atomic_read(&orig_node->last_ttvn))) return false; batadv_dbg(BATADV_DBG_TT, bat_priv, "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n", addr, batadv_print_vid(vid), orig_node->orig); return true; } /** * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the * maximum packet size that can be transported through the mesh * @soft_iface: netdev struct of the mesh interface * * Remove entries older than 'timeout' and half timeout if more entries need * to be removed. */ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); int packet_size_max = atomic_read(&bat_priv->packet_size_max); int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2; bool reduced = false; spin_lock_bh(&bat_priv->tt.commit_lock); while (timeout) { table_size = batadv_tt_local_table_transmit_size(bat_priv); if (packet_size_max >= table_size) break; batadv_tt_local_purge(bat_priv, timeout); batadv_tt_local_purge_pending_clients(bat_priv); timeout /= 2; reduced = true; net_ratelimited_function(batadv_info, soft_iface, "Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n", packet_size_max); } /* commit these changes immediately, to avoid synchronization problem * with the TTVN */ if (reduced) batadv_tt_local_commit_changes_nolock(bat_priv); spin_unlock_bh(&bat_priv->tt.commit_lock); } /** * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container * @bat_priv: the bat priv with all the soft interface information * @orig: the orig_node of the ogm * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) * @tvlv_value: tvlv buffer containing the gateway data * @tvlv_value_len: tvlv buffer length */ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, struct batadv_orig_node *orig, u8 flags, void *tvlv_value, u16 tvlv_value_len) { struct batadv_tvlv_tt_vlan_data *tt_vlan; struct batadv_tvlv_tt_change *tt_change; struct batadv_tvlv_tt_data *tt_data; u16 num_entries, num_vlan; if (tvlv_value_len < sizeof(*tt_data)) return; tt_data = tvlv_value; tvlv_value_len -= sizeof(*tt_data); num_vlan = ntohs(tt_data->num_vlan); if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan) return; tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1); tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan); tvlv_value_len -= sizeof(*tt_vlan) * num_vlan; num_entries = batadv_tt_entries(tvlv_value_len); batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change, num_entries, tt_data->ttvn); } /** * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv * container * @bat_priv: the bat priv with all the soft interface information * @src: mac address of tt tvlv sender * @dst: mac address of tt tvlv recipient * @tvlv_value: tvlv buffer containing the tt data * @tvlv_value_len: tvlv buffer length * * Return: NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS * otherwise. */ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv, u8 *src, u8 *dst, void *tvlv_value, u16 tvlv_value_len) { struct batadv_tvlv_tt_data *tt_data; u16 tt_vlan_len, tt_num_entries; char tt_flag; bool ret; if (tvlv_value_len < sizeof(*tt_data)) return NET_RX_SUCCESS; tt_data = tvlv_value; tvlv_value_len -= sizeof(*tt_data); tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data); tt_vlan_len *= ntohs(tt_data->num_vlan); if (tvlv_value_len < tt_vlan_len) return NET_RX_SUCCESS; tvlv_value_len -= tt_vlan_len; tt_num_entries = batadv_tt_entries(tvlv_value_len); switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) { case BATADV_TT_REQUEST: batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX); /* If this node cannot provide a TT response the tt_request is * forwarded */ ret = batadv_send_tt_response(bat_priv, tt_data, src, dst); if (!ret) { if (tt_data->flags & BATADV_TT_FULL_TABLE) tt_flag = 'F'; else tt_flag = '.'; batadv_dbg(BATADV_DBG_TT, bat_priv, "Routing TT_REQUEST to %pM [%c]\n", dst, tt_flag); /* tvlv API will re-route the packet */ return NET_RX_DROP; } break; case BATADV_TT_RESPONSE: batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); if (batadv_is_my_mac(bat_priv, dst)) { batadv_handle_tt_response(bat_priv, tt_data, src, tt_num_entries); return NET_RX_SUCCESS; } if (tt_data->flags & BATADV_TT_FULL_TABLE) tt_flag = 'F'; else tt_flag = '.'; batadv_dbg(BATADV_DBG_TT, bat_priv, "Routing TT_RESPONSE to %pM [%c]\n", dst, tt_flag); /* tvlv API will re-route the packet */ return NET_RX_DROP; } return NET_RX_SUCCESS; } /** * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv * container * @bat_priv: the bat priv with all the soft interface information * @src: mac address of tt tvlv sender * @dst: mac address of tt tvlv recipient * @tvlv_value: tvlv buffer containing the tt data * @tvlv_value_len: tvlv buffer length * * Return: NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS * otherwise. */ static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv, u8 *src, u8 *dst, void *tvlv_value, u16 tvlv_value_len) { struct batadv_tvlv_roam_adv *roaming_adv; struct batadv_orig_node *orig_node = NULL; /* If this node is not the intended recipient of the * roaming advertisement the packet is forwarded * (the tvlv API will re-route the packet). */ if (!batadv_is_my_mac(bat_priv, dst)) return NET_RX_DROP; if (tvlv_value_len < sizeof(*roaming_adv)) goto out; orig_node = batadv_orig_hash_find(bat_priv, src); if (!orig_node) goto out; batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); roaming_adv = tvlv_value; batadv_dbg(BATADV_DBG_TT, bat_priv, "Received ROAMING_ADV from %pM (client %pM)\n", src, roaming_adv->client); batadv_tt_global_add(bat_priv, orig_node, roaming_adv->client, ntohs(roaming_adv->vid), BATADV_TT_CLIENT_ROAM, atomic_read(&orig_node->last_ttvn) + 1); out: batadv_orig_node_put(orig_node); return NET_RX_SUCCESS; } /** * batadv_tt_init() - initialise the translation table internals * @bat_priv: the bat priv with all the soft interface information * * Return: 0 on success or negative error number in case of failure. */ int batadv_tt_init(struct batadv_priv *bat_priv) { int ret; /* synchronized flags must be remote */ BUILD_BUG_ON(!(BATADV_TT_SYNC_MASK & BATADV_TT_REMOTE_MASK)); ret = batadv_tt_local_init(bat_priv); if (ret < 0) return ret; ret = batadv_tt_global_init(bat_priv); if (ret < 0) { batadv_tt_local_table_free(bat_priv); return ret; } batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1, batadv_tt_tvlv_unicast_handler_v1, NULL, BATADV_TVLV_TT, 1, BATADV_NO_FLAGS); batadv_tvlv_handler_register(bat_priv, NULL, batadv_roam_tvlv_unicast_handler_v1, NULL, BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS); INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge); queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work, msecs_to_jiffies(BATADV_TT_WORK_PERIOD)); return 1; } /** * batadv_tt_global_is_isolated() - check if a client is marked as isolated * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the client * @vid: the identifier of the VLAN where this client is connected * * Return: true if the client is marked with the TT_CLIENT_ISOLA flag, false * otherwise */ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_tt_global_entry *tt; bool ret; tt = batadv_tt_global_hash_find(bat_priv, addr, vid); if (!tt) return false; ret = tt->common.flags & BATADV_TT_CLIENT_ISOLA; batadv_tt_global_entry_put(tt); return ret; } /** * batadv_tt_cache_init() - Initialize tt memory object cache * * Return: 0 on success or negative error number in case of failure. */ int __init batadv_tt_cache_init(void) { size_t tl_size = sizeof(struct batadv_tt_local_entry); size_t tg_size = sizeof(struct batadv_tt_global_entry); size_t tt_orig_size = sizeof(struct batadv_tt_orig_list_entry); size_t tt_change_size = sizeof(struct batadv_tt_change_node); size_t tt_req_size = sizeof(struct batadv_tt_req_node); size_t tt_roam_size = sizeof(struct batadv_tt_roam_node); batadv_tl_cache = kmem_cache_create("batadv_tl_cache", tl_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!batadv_tl_cache) return -ENOMEM; batadv_tg_cache = kmem_cache_create("batadv_tg_cache", tg_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!batadv_tg_cache) goto err_tt_tl_destroy; batadv_tt_orig_cache = kmem_cache_create("batadv_tt_orig_cache", tt_orig_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!batadv_tt_orig_cache) goto err_tt_tg_destroy; batadv_tt_change_cache = kmem_cache_create("batadv_tt_change_cache", tt_change_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!batadv_tt_change_cache) goto err_tt_orig_destroy; batadv_tt_req_cache = kmem_cache_create("batadv_tt_req_cache", tt_req_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!batadv_tt_req_cache) goto err_tt_change_destroy; batadv_tt_roam_cache = kmem_cache_create("batadv_tt_roam_cache", tt_roam_size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!batadv_tt_roam_cache) goto err_tt_req_destroy; return 0; err_tt_req_destroy: kmem_cache_destroy(batadv_tt_req_cache); batadv_tt_req_cache = NULL; err_tt_change_destroy: kmem_cache_destroy(batadv_tt_change_cache); batadv_tt_change_cache = NULL; err_tt_orig_destroy: kmem_cache_destroy(batadv_tt_orig_cache); batadv_tt_orig_cache = NULL; err_tt_tg_destroy: kmem_cache_destroy(batadv_tg_cache); batadv_tg_cache = NULL; err_tt_tl_destroy: kmem_cache_destroy(batadv_tl_cache); batadv_tl_cache = NULL; return -ENOMEM; } /** * batadv_tt_cache_destroy() - Destroy tt memory object cache */ void batadv_tt_cache_destroy(void) { kmem_cache_destroy(batadv_tl_cache); kmem_cache_destroy(batadv_tg_cache); kmem_cache_destroy(batadv_tt_orig_cache); kmem_cache_destroy(batadv_tt_change_cache); kmem_cache_destroy(batadv_tt_req_cache); kmem_cache_destroy(batadv_tt_roam_cache); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MIGRATE_H #define _LINUX_MIGRATE_H #include <linux/mm.h> #include <linux/mempolicy.h> #include <linux/migrate_mode.h> #include <linux/hugetlb.h> typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); typedef void free_folio_t(struct folio *folio, unsigned long private); struct migration_target_control; /* * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; * - zero on page migration success; */ #define MIGRATEPAGE_SUCCESS 0 #define MIGRATEPAGE_UNMAP 1 /** * struct movable_operations - Driver page migration * @isolate_page: * The VM calls this function to prepare the page to be moved. The page * is locked and the driver should not unlock it. The driver should * return ``true`` if the page is movable and ``false`` if it is not * currently movable. After this function returns, the VM uses the * page->lru field, so the driver must preserve any information which * is usually stored here. * * @migrate_page: * After isolation, the VM calls this function with the isolated * @src page. The driver should copy the contents of the * @src page to the @dst page and set up the fields of @dst page. * Both pages are locked. * If page migration is successful, the driver should call * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS. * If the driver cannot migrate the page at the moment, it can return * -EAGAIN. The VM interprets this as a temporary migration failure and * will retry it later. Any other error value is a permanent migration * failure and migration will not be retried. * The driver shouldn't touch the @src->lru field while in the * migrate_page() function. It may write to @dst->lru. * * @putback_page: * If migration fails on the isolated page, the VM informs the driver * that the page is no longer a candidate for migration by calling * this function. The driver should put the isolated page back into * its own data structure. */ struct movable_operations { bool (*isolate_page)(struct page *, isolate_mode_t); int (*migrate_page)(struct page *dst, struct page *src, enum migrate_mode); void (*putback_page)(struct page *); }; /* Defined in mm/debug.c: */ extern const char *migrate_reason_names[MR_TYPES]; #ifdef CONFIG_MIGRATION void putback_movable_pages(struct list_head *l); int migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode); int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded); struct folio *alloc_migration_target(struct folio *src, unsigned long private); bool isolate_movable_page(struct page *page, isolate_mode_t mode); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src); void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) __releases(ptl); void folio_migrate_flags(struct folio *newfolio, struct folio *folio); int folio_migrate_mapping(struct address_space *mapping, struct folio *newfolio, struct folio *folio, int extra_count); #else static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded) { return -ENOSYS; } static inline struct folio *alloc_migration_target(struct folio *src, unsigned long private) { return NULL; } static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) { return false; } static inline int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) { return -ENOSYS; } #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION bool PageMovable(struct page *page); void __SetPageMovable(struct page *page, const struct movable_operations *ops); void __ClearPageMovable(struct page *page); #else static inline bool PageMovable(struct page *page) { return false; } static inline void __SetPageMovable(struct page *page, const struct movable_operations *ops) { } static inline void __ClearPageMovable(struct page *page) { } #endif static inline bool folio_test_movable(struct folio *folio) { return PageMovable(&folio->page); } static inline const struct movable_operations *folio_movable_ops(struct folio *folio) { VM_BUG_ON(!__folio_test_movable(folio)); return (const struct movable_operations *) ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE); } static inline const struct movable_operations *page_movable_ops(struct page *page) { VM_BUG_ON(!__PageMovable(page)); return (const struct movable_operations *) ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE); } #ifdef CONFIG_NUMA_BALANCING int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node); int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node); #else static inline int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } static inline int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_MIGRATION /* * Watch out for PAE architecture, which has an unsigned long, and might not * have enough bits to store all physical address and flags. So far we have * enough room for all our flags. */ #define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_MIGRATE (1UL << 1) #define MIGRATE_PFN_WRITE (1UL << 3) #define MIGRATE_PFN_SHIFT 6 static inline struct page *migrate_pfn_to_page(unsigned long mpfn) { if (!(mpfn & MIGRATE_PFN_VALID)) return NULL; return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); } static inline unsigned long migrate_pfn(unsigned long pfn) { return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; } enum migrate_vma_direction { MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2, }; struct migrate_vma { struct vm_area_struct *vma; /* * Both src and dst array must be big enough for * (end - start) >> PAGE_SHIFT entries. * * The src array must not be modified by the caller after * migrate_vma_setup(), and must not change the dst array after * migrate_vma_pages() returns. */ unsigned long *dst; unsigned long *src; unsigned long cpages; unsigned long npages; unsigned long start; unsigned long end; /* * Set to the owner value also stored in page->pgmap->owner for * migrating out of device private memory. The flags also need to * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. * The caller should always set this field when using mmu notifier * callbacks to avoid device MMU invalidations for device private * pages that are not being migrated. */ void *pgmap_owner; unsigned long flags; /* * Set to vmf->page if this is being called to migrate a page as part of * a migrate_to_ram() callback. */ struct page *fault_page; }; int migrate_vma_setup(struct migrate_vma *args); void migrate_vma_pages(struct migrate_vma *migrate); void migrate_vma_finalize(struct migrate_vma *migrate); int migrate_device_range(unsigned long *src_pfns, unsigned long start, unsigned long npages); void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages); void migrate_device_finalize(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages); #endif /* CONFIG_MIGRATION */ #endif /* _LINUX_MIGRATE_H */
98 297 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 /* * linux/drivers/video/console/fbcon.h -- Low level frame buffer based console driver * * Copyright (C) 1997 Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #ifndef _VIDEO_FBCON_H #define _VIDEO_FBCON_H #include <linux/types.h> #include <linux/vt_buffer.h> #include <linux/vt_kern.h> #include <linux/workqueue.h> #include <asm/io.h> /* * This is the interface between the low-level console driver and the * low-level frame buffer device */ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION u_short scrollmode; /* Scroll Method, use fb_scrollmode() */ #endif u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ int cursor_shape; int con_rotate; u32 xres_virtual; u32 yres_virtual; u32 height; u32 width; u32 bits_per_pixel; u32 grayscale; u32 nonstd; u32 accel_flags; u32 rotate; struct fb_bitfield red; struct fb_bitfield green; struct fb_bitfield blue; struct fb_bitfield transp; const struct fb_videomode *mode; }; struct fbcon_ops { void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width); void (*putcs)(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx, int fg, int bg); void (*clear_margins)(struct vc_data *vc, struct fb_info *info, int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, bool enable, int fg, int bg); int (*update_start)(struct fb_info *info); int (*rotate_font)(struct fb_info *info, struct vc_data *vc); struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ struct delayed_work cursor_work; /* Cursor timer */ struct fb_cursor cursor_state; struct fbcon_display *p; struct fb_info *info; int currcon; /* Current VC. */ int cur_blink_jiffies; int cursor_flash; int cursor_reset; int blank_state; int graphics; int save_graphics; /* for debug enter/leave */ bool initialized; int rotate; int cur_rotate; char *cursor_data; u8 *fontbuffer; u8 *fontdata; u8 *cursor_src; u32 cursor_size; u32 fd_size; }; /* * Attribute Decoding */ /* Color */ #define attr_fgcol(fgshift,s) \ (((s) >> (fgshift)) & 0x0f) #define attr_bgcol(bgshift,s) \ (((s) >> (bgshift)) & 0x0f) /* Monochrome */ #define attr_bold(s) \ ((s) & 0x200) #define attr_reverse(s) \ ((s) & 0x800) #define attr_underline(s) \ ((s) & 0x400) #define attr_blink(s) \ ((s) & 0x8000) static inline int mono_col(const struct fb_info *info) { __u32 max_len; max_len = max(info->var.green.length, info->var.red.length); max_len = max(info->var.blue.length, max_len); return (~(0xfff << max_len)) & 0xff; } static inline int attr_col_ec(int shift, struct vc_data *vc, struct fb_info *info, int is_fg) { int is_mono01; int col; int fg; int bg; if (!vc) return 0; if (vc->vc_can_do_color) return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char) : attr_bgcol(shift,vc->vc_video_erase_char); if (!info) return 0; col = mono_col(info); is_mono01 = info->fix.visual == FB_VISUAL_MONO01; if (attr_reverse(vc->vc_video_erase_char)) { fg = is_mono01 ? col : 0; bg = is_mono01 ? 0 : col; } else { fg = is_mono01 ? 0 : col; bg = is_mono01 ? col : 0; } return is_fg ? fg : bg; } #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) /* * Scroll Method */ /* There are several methods fbcon can use to move text around the screen: * * Operation Pan Wrap *--------------------------------------------- * SCROLL_MOVE copyarea No No * SCROLL_PAN_MOVE copyarea Yes No * SCROLL_WRAP_MOVE copyarea No Yes * SCROLL_REDRAW imageblit No No * SCROLL_PAN_REDRAW imageblit Yes No * SCROLL_WRAP_REDRAW imageblit No Yes * * (SCROLL_WRAP_REDRAW is not implemented yet) * * In general, fbcon will choose the best scrolling * method based on the rule below: * * Pan/Wrap > accel imageblit > accel copyarea > * soft imageblit > (soft copyarea) * * Exception to the rule: Pan + accel copyarea is * preferred over Pan + accel imageblit. * * The above is typical for PCI/AGP cards. Unless * overridden, fbcon will never use soft copyarea. * * If you need to override the above rule, set the * appropriate flags in fb_info->flags. For example, * to prefer copyarea over imageblit, set * FBINFO_READS_FAST. * * Other notes: * + use the hardware engine to move the text * (hw-accelerated copyarea() and fillrect()) * + use hardware-supported panning on a large virtual screen * + amifb can not only pan, but also wrap the display by N lines * (i.e. visible line i = physical line (i+N) % yres). * + read what's already rendered on the screen and * write it in a different place (this is cfb_copyarea()) * + re-render the text to the screen * * Whether to use wrapping or panning can only be figured out at * runtime (when we know whether our font height is a multiple * of the pan/wrap step) * */ #define SCROLL_MOVE 0x001 #define SCROLL_PAN_MOVE 0x002 #define SCROLL_WRAP_MOVE 0x003 #define SCROLL_REDRAW 0x004 #define SCROLL_PAN_REDRAW 0x005 static inline u_short fb_scrollmode(struct fbcon_display *fb) { #ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION return fb->scrollmode; #else /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */ return SCROLL_REDRAW; #endif } #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif extern void fbcon_set_bitops(struct fbcon_ops *ops); extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor); #define FBCON_ATTRIBUTE_UNDERLINE 1 #define FBCON_ATTRIBUTE_REVERSE 2 #define FBCON_ATTRIBUTE_BOLD 4 static inline int real_y(struct fbcon_display *p, int ypos) { int rows = p->vrows; ypos += p->yscroll; return ypos < rows ? ypos : ypos - rows; } static inline int get_attribute(struct fb_info *info, u16 c) { int attribute = 0; if (fb_get_color_depth(&info->var, &info->fix) == 1) { if (attr_underline(c)) attribute |= FBCON_ATTRIBUTE_UNDERLINE; if (attr_reverse(c)) attribute |= FBCON_ATTRIBUTE_REVERSE; if (attr_bold(c)) attribute |= FBCON_ATTRIBUTE_BOLD; } return attribute; } #define FBCON_SWAP(i,r,v) ({ \ typeof(r) _r = (r); \ typeof(v) _v = (v); \ (void) (&_r == &_v); \ (i == FB_ROTATE_UR || i == FB_ROTATE_UD) ? _r : _v; }) #ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION extern void fbcon_set_rotate(struct fbcon_ops *ops); #else #define fbcon_set_rotate(x) do {} while(0) #endif /* CONFIG_FRAMEBUFFER_CONSOLE_ROTATION */ #endif /* _VIDEO_FBCON_H */
775 6612 6619 6618 6612 6624 4842 4853 2054 406 151 150 336 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGE_REF_H #define _LINUX_PAGE_REF_H #include <linux/atomic.h> #include <linux/mm_types.h> #include <linux/page-flags.h> #include <linux/tracepoint-defs.h> DECLARE_TRACEPOINT(page_ref_set); DECLARE_TRACEPOINT(page_ref_mod); DECLARE_TRACEPOINT(page_ref_mod_and_test); DECLARE_TRACEPOINT(page_ref_mod_and_return); DECLARE_TRACEPOINT(page_ref_mod_unless); DECLARE_TRACEPOINT(page_ref_freeze); DECLARE_TRACEPOINT(page_ref_unfreeze); #ifdef CONFIG_DEBUG_PAGE_REF /* * Ideally we would want to use the trace_<tracepoint>_enabled() helper * functions. But due to include header file issues, that is not * feasible. Instead we have to open code the static key functions. * * See trace_##name##_enabled(void) in include/linux/tracepoint.h */ #define page_ref_tracepoint_active(t) tracepoint_enabled(t) extern void __page_ref_set(struct page *page, int v); extern void __page_ref_mod(struct page *page, int v); extern void __page_ref_mod_and_test(struct page *page, int v, int ret); extern void __page_ref_mod_and_return(struct page *page, int v, int ret); extern void __page_ref_mod_unless(struct page *page, int v, int u); extern void __page_ref_freeze(struct page *page, int v, int ret); extern void __page_ref_unfreeze(struct page *page, int v); #else #define page_ref_tracepoint_active(t) false static inline void __page_ref_set(struct page *page, int v) { } static inline void __page_ref_mod(struct page *page, int v) { } static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) { } static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) { } static inline void __page_ref_mod_unless(struct page *page, int v, int u) { } static inline void __page_ref_freeze(struct page *page, int v, int ret) { } static inline void __page_ref_unfreeze(struct page *page, int v) { } #endif static inline int page_ref_count(const struct page *page) { return atomic_read(&page->_refcount); } /** * folio_ref_count - The reference count on this folio. * @folio: The folio. * * The refcount is usually incremented by calls to folio_get() and * decremented by calls to folio_put(). Some typical users of the * folio refcount: * * - Each reference from a page table * - The page cache * - Filesystem private data * - The LRU list * - Pipes * - Direct IO which references this page in the process address space * * Return: The number of references to this folio. */ static inline int folio_ref_count(const struct folio *folio) { return page_ref_count(&folio->page); } static inline int page_count(const struct page *page) { return folio_ref_count(page_folio(page)); } static inline void set_page_count(struct page *page, int v) { atomic_set(&page->_refcount, v); if (page_ref_tracepoint_active(page_ref_set)) __page_ref_set(page, v); } static inline void folio_set_count(struct folio *folio, int v) { set_page_count(&folio->page, v); } /* * Setup the page count before being freed into the page allocator for * the first time (boot or memory hotplug) */ static inline void init_page_count(struct page *page) { set_page_count(page, 1); } static inline void page_ref_add(struct page *page, int nr) { atomic_add(nr, &page->_refcount); if (page_ref_tracepoint_active(page_ref_mod)) __page_ref_mod(page, nr); } static inline void folio_ref_add(struct folio *folio, int nr) { page_ref_add(&folio->page, nr); } static inline void page_ref_sub(struct page *page, int nr) { atomic_sub(nr, &page->_refcount); if (page_ref_tracepoint_active(page_ref_mod)) __page_ref_mod(page, -nr); } static inline void folio_ref_sub(struct folio *folio, int nr) { page_ref_sub(&folio->page, nr); } static inline int folio_ref_sub_return(struct folio *folio, int nr) { int ret = atomic_sub_return(nr, &folio->_refcount); if (page_ref_tracepoint_active(page_ref_mod_and_return)) __page_ref_mod_and_return(&folio->page, -nr, ret); return ret; } static inline void page_ref_inc(struct page *page) { atomic_inc(&page->_refcount); if (page_ref_tracepoint_active(page_ref_mod)) __page_ref_mod(page, 1); } static inline void folio_ref_inc(struct folio *folio) { page_ref_inc(&folio->page); } static inline void page_ref_dec(struct page *page) { atomic_dec(&page->_refcount); if (page_ref_tracepoint_active(page_ref_mod)) __page_ref_mod(page, -1); } static inline void folio_ref_dec(struct folio *folio) { page_ref_dec(&folio->page); } static inline int page_ref_sub_and_test(struct page *page, int nr) { int ret = atomic_sub_and_test(nr, &page->_refcount); if (page_ref_tracepoint_active(page_ref_mod_and_test)) __page_ref_mod_and_test(page, -nr, ret); return ret; } static inline int folio_ref_sub_and_test(struct folio *folio, int nr) { return page_ref_sub_and_test(&folio->page, nr); } static inline int page_ref_inc_return(struct page *page) { int ret = atomic_inc_return(&page->_refcount); if (page_ref_tracepoint_active(page_ref_mod_and_return)) __page_ref_mod_and_return(page, 1, ret); return ret; } static inline int folio_ref_inc_return(struct folio *folio) { return page_ref_inc_return(&folio->page); } static inline int page_ref_dec_and_test(struct page *page) { int ret = atomic_dec_and_test(&page->_refcount); if (page_ref_tracepoint_active(page_ref_mod_and_test)) __page_ref_mod_and_test(page, -1, ret); return ret; } static inline int folio_ref_dec_and_test(struct folio *folio) { return page_ref_dec_and_test(&folio->page); } static inline int page_ref_dec_return(struct page *page) { int ret = atomic_dec_return(&page->_refcount); if (page_ref_tracepoint_active(page_ref_mod_and_return)) __page_ref_mod_and_return(page, -1, ret); return ret; } static inline int folio_ref_dec_return(struct folio *folio) { return page_ref_dec_return(&folio->page); } static inline bool page_ref_add_unless(struct page *page, int nr, int u) { bool ret = false; rcu_read_lock(); /* avoid writing to the vmemmap area being remapped */ if (!page_is_fake_head(page) && page_ref_count(page) != u) ret = atomic_add_unless(&page->_refcount, nr, u); rcu_read_unlock(); if (page_ref_tracepoint_active(page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); return ret; } static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u) { return page_ref_add_unless(&folio->page, nr, u); } /** * folio_try_get - Attempt to increase the refcount on a folio. * @folio: The folio. * * If you do not already have a reference to a folio, you can attempt to * get one using this function. It may fail if, for example, the folio * has been freed since you found a pointer to it, or it is frozen for * the purposes of splitting or migration. * * Return: True if the reference count was successfully incremented. */ static inline bool folio_try_get(struct folio *folio) { return folio_ref_add_unless(folio, 1, 0); } static inline bool folio_ref_try_add(struct folio *folio, int count) { return folio_ref_add_unless(folio, count, 0); } static inline int page_ref_freeze(struct page *page, int count) { int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); if (page_ref_tracepoint_active(page_ref_freeze)) __page_ref_freeze(page, count, ret); return ret; } static inline int folio_ref_freeze(struct folio *folio, int count) { return page_ref_freeze(&folio->page, count); } static inline void page_ref_unfreeze(struct page *page, int count) { VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); atomic_set_release(&page->_refcount, count); if (page_ref_tracepoint_active(page_ref_unfreeze)) __page_ref_unfreeze(page, count); } static inline void folio_ref_unfreeze(struct folio *folio, int count) { page_ref_unfreeze(&folio->page, count); } #endif
391 391 21 37 14 6 13 1 11 14 25 21 37 34 2 5 15 11 15 15 4 4 4 4 8 3 3 1 7 8 178 177 180 178 1 18 18 18 17 4 15 7 7 7 138 137 138 138 5 1 1 3 3 3 3 20 20 17 3 20 2 15 9 22 19 17 17 2 17 17 7 2 17 9 8 6 1 17 1 4 5 5 4 4 17 18 18 18 18 9 17 17 3 3 3 3 3 3 26 26 25 26 26 26 10 26 26 25 26 16 6 42 42 8 34 42 19 19 2 9 35 35 23 32 10 35 7 38 4 42 2 7 5 2 7 4 20 20 20 11 20 20 21 21 12 20 21 84 3 61 20 20 1 59 3 4 15 40 37 18 62 40 33 51 22 22 14 13 13 13 22 34 30 4 27 3 1 1 18 7 4 3 22 22 3 3 3 3 3 15 15 4 3 3 4 4 4 2 2 3 3 3 18 15 15 15 27 24 3 18 3 3 4 4 4 4 4 12 9 9 12 4 12 4 6 9 6 12 12 12 15 12 12 15 15 15 15 1 1 1 1 8 6 2 6 6 10 4 4 4 1 5 4 3 2 6 6 5 1 1 4 5 2 3 1 4 5 17 15 2 5 7 3 7 5 5 11 5 5 19 6 18 11 9 5 11 7 7 7 7 6 54 54 1 112 112 6 6 12 7 17 125 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 // SPDX-License-Identifier: GPL-2.0 /* Multipath TCP * * Copyright (c) 2020, Red Hat, Inc. */ #define pr_fmt(fmt) "MPTCP: " fmt #include <linux/inet.h> #include <linux/kernel.h> #include <net/inet_common.h> #include <net/netns/generic.h> #include <net/mptcp.h> #include "protocol.h" #include "mib.h" #include "mptcp_pm_gen.h" static int pm_nl_pernet_id; struct mptcp_pm_add_entry { struct list_head list; struct mptcp_addr_info addr; u8 retrans_times; struct timer_list add_timer; struct mptcp_sock *sock; }; struct pm_nl_pernet { /* protects pernet updates */ spinlock_t lock; struct list_head local_addr_list; unsigned int addrs; unsigned int stale_loss_cnt; unsigned int add_addr_signal_max; unsigned int add_addr_accept_max; unsigned int local_addr_max; unsigned int subflows_max; unsigned int next_id; DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); }; #define MPTCP_PM_ADDR_MAX 8 #define ADD_ADDR_RETRANS_MAX 3 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) { return net_generic(net, pm_nl_pernet_id); } static struct pm_nl_pernet * pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) { return pm_nl_get_pernet(sock_net((struct sock *)msk)); } bool mptcp_addresses_equal(const struct mptcp_addr_info *a, const struct mptcp_addr_info *b, bool use_port) { bool addr_equals = false; if (a->family == b->family) { if (a->family == AF_INET) addr_equals = a->addr.s_addr == b->addr.s_addr; #if IS_ENABLED(CONFIG_MPTCP_IPV6) else addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6); } else if (a->family == AF_INET) { if (ipv6_addr_v4mapped(&b->addr6)) addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3]; } else if (b->family == AF_INET) { if (ipv6_addr_v4mapped(&a->addr6)) addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr; #endif } if (!addr_equals) return false; if (!use_port) return true; return a->port == b->port; } void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr) { addr->family = skc->skc_family; addr->port = htons(skc->skc_num); if (addr->family == AF_INET) addr->addr.s_addr = skc->skc_rcv_saddr; #if IS_ENABLED(CONFIG_MPTCP_IPV6) else if (addr->family == AF_INET6) addr->addr6 = skc->skc_v6_rcv_saddr; #endif } static void remote_address(const struct sock_common *skc, struct mptcp_addr_info *addr) { addr->family = skc->skc_family; addr->port = skc->skc_dport; if (addr->family == AF_INET) addr->addr.s_addr = skc->skc_daddr; #if IS_ENABLED(CONFIG_MPTCP_IPV6) else if (addr->family == AF_INET6) addr->addr6 = skc->skc_v6_daddr; #endif } static bool lookup_subflow_by_saddr(const struct list_head *list, const struct mptcp_addr_info *saddr) { struct mptcp_subflow_context *subflow; struct mptcp_addr_info cur; struct sock_common *skc; list_for_each_entry(subflow, list, node) { skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); mptcp_local_address(skc, &cur); if (mptcp_addresses_equal(&cur, saddr, saddr->port)) return true; } return false; } static bool lookup_subflow_by_daddr(const struct list_head *list, const struct mptcp_addr_info *daddr) { struct mptcp_subflow_context *subflow; struct mptcp_addr_info cur; list_for_each_entry(subflow, list, node) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (!((1 << inet_sk_state_load(ssk)) & (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) continue; remote_address((struct sock_common *)ssk, &cur); if (mptcp_addresses_equal(&cur, daddr, daddr->port)) return true; } return false; } static bool select_local_address(const struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, struct mptcp_pm_addr_entry *new_entry) { struct mptcp_pm_addr_entry *entry; bool found = false; msk_owned_by_me(msk); rcu_read_lock(); list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) continue; if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) continue; *new_entry = *entry; found = true; break; } rcu_read_unlock(); return found; } static bool select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, struct mptcp_pm_addr_entry *new_entry) { struct mptcp_pm_addr_entry *entry; bool found = false; rcu_read_lock(); /* do not keep any additional per socket state, just signal * the address list in order. * Note: removal from the local address list during the msk life-cycle * can lead to additional addresses not being announced. */ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) continue; if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) continue; *new_entry = *entry; found = true; break; } rcu_read_unlock(); return found; } unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk) { const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); return READ_ONCE(pernet->add_addr_signal_max); } EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max); unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk) { struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); return READ_ONCE(pernet->add_addr_accept_max); } EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max); unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk) { struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); return READ_ONCE(pernet->subflows_max); } EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max); unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk) { struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); return READ_ONCE(pernet->local_addr_max); } EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max); bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) { struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) || (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { WRITE_ONCE(msk->pm.work_pending, false); return false; } return true; } struct mptcp_pm_add_entry * mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, const struct mptcp_addr_info *addr) { struct mptcp_pm_add_entry *entry; lockdep_assert_held(&msk->pm.lock); list_for_each_entry(entry, &msk->pm.anno_list, list) { if (mptcp_addresses_equal(&entry->addr, addr, true)) return entry; } return NULL; } bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) { struct mptcp_pm_add_entry *entry; struct mptcp_addr_info saddr; bool ret = false; mptcp_local_address((struct sock_common *)sk, &saddr); spin_lock_bh(&msk->pm.lock); list_for_each_entry(entry, &msk->pm.anno_list, list) { if (mptcp_addresses_equal(&entry->addr, &saddr, true)) { ret = true; goto out; } } out: spin_unlock_bh(&msk->pm.lock); return ret; } static void mptcp_pm_add_timer(struct timer_list *timer) { struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); struct mptcp_sock *msk = entry->sock; struct sock *sk = (struct sock *)msk; pr_debug("msk=%p\n", msk); if (!msk) return; if (inet_sk_state_load(sk) == TCP_CLOSE) return; if (!entry->addr.id) return; if (mptcp_pm_should_add_signal_addr(msk)) { sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8); goto out; } spin_lock_bh(&msk->pm.lock); if (!mptcp_pm_should_add_signal_addr(msk)) { pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); mptcp_pm_announce_addr(msk, &entry->addr, false); mptcp_pm_add_addr_send_ack(msk); entry->retrans_times++; } if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) sk_reset_timer(sk, timer, jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); spin_unlock_bh(&msk->pm.lock); if (entry->retrans_times == ADD_ADDR_RETRANS_MAX) mptcp_pm_subflow_established(msk); out: __sock_put(sk); } struct mptcp_pm_add_entry * mptcp_pm_del_add_timer(struct mptcp_sock *msk, const struct mptcp_addr_info *addr, bool check_id) { struct mptcp_pm_add_entry *entry; struct sock *sk = (struct sock *)msk; spin_lock_bh(&msk->pm.lock); entry = mptcp_lookup_anno_list_by_saddr(msk, addr); if (entry && (!check_id || entry->addr.id == addr->id)) entry->retrans_times = ADD_ADDR_RETRANS_MAX; spin_unlock_bh(&msk->pm.lock); if (entry && (!check_id || entry->addr.id == addr->id)) sk_stop_timer_sync(sk, &entry->add_timer); return entry; } bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, const struct mptcp_addr_info *addr) { struct mptcp_pm_add_entry *add_entry = NULL; struct sock *sk = (struct sock *)msk; struct net *net = sock_net(sk); lockdep_assert_held(&msk->pm.lock); add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); if (add_entry) { if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) return false; sk_reset_timer(sk, &add_entry->add_timer, jiffies + mptcp_get_add_addr_timeout(net)); return true; } add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); if (!add_entry) return false; list_add(&add_entry->list, &msk->pm.anno_list); add_entry->addr = *addr; add_entry->sock = msk; add_entry->retrans_times = 0; timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); sk_reset_timer(sk, &add_entry->add_timer, jiffies + mptcp_get_add_addr_timeout(net)); return true; } void mptcp_pm_free_anno_list(struct mptcp_sock *msk) { struct mptcp_pm_add_entry *entry, *tmp; struct sock *sk = (struct sock *)msk; LIST_HEAD(free_list); pr_debug("msk=%p\n", msk); spin_lock_bh(&msk->pm.lock); list_splice_init(&msk->pm.anno_list, &free_list); spin_unlock_bh(&msk->pm.lock); list_for_each_entry_safe(entry, tmp, &free_list, list) { sk_stop_timer_sync(sk, &entry->add_timer); kfree(entry); } } /* Fill all the remote addresses into the array addrs[], * and return the array size. */ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *local, bool fullmesh, struct mptcp_addr_info *addrs) { bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); struct sock *sk = (struct sock *)msk, *ssk; struct mptcp_subflow_context *subflow; struct mptcp_addr_info remote = { 0 }; unsigned int subflows_max; int i = 0; subflows_max = mptcp_pm_get_subflows_max(msk); remote_address((struct sock_common *)sk, &remote); /* Non-fullmesh endpoint, fill in the single entry * corresponding to the primary MPC subflow remote address */ if (!fullmesh) { if (deny_id0) return 0; if (!mptcp_pm_addr_families_match(sk, local, &remote)) return 0; msk->pm.subflows++; addrs[i++] = remote; } else { DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); /* Forbid creation of new subflows matching existing * ones, possibly already created by incoming ADD_ADDR */ bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); mptcp_for_each_subflow(msk, subflow) if (READ_ONCE(subflow->local_id) == local->id) __set_bit(subflow->remote_id, unavail_id); mptcp_for_each_subflow(msk, subflow) { ssk = mptcp_subflow_tcp_sock(subflow); remote_address((struct sock_common *)ssk, &addrs[i]); addrs[i].id = READ_ONCE(subflow->remote_id); if (deny_id0 && !addrs[i].id) continue; if (test_bit(addrs[i].id, unavail_id)) continue; if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) continue; if (msk->pm.subflows < subflows_max) { /* forbid creating multiple address towards * this id */ __set_bit(addrs[i].id, unavail_id); msk->pm.subflows++; i++; } } } return i; } static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, bool prio, bool backup) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow; pr_debug("send ack for %s\n", prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); slow = lock_sock_fast(ssk); if (prio) { subflow->send_mp_prio = 1; subflow->request_bkup = backup; } __mptcp_subflow_send_ack(ssk); unlock_sock_fast(ssk, slow); } static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, bool prio, bool backup) { spin_unlock_bh(&msk->pm.lock); __mptcp_pm_send_ack(msk, subflow, prio, backup); spin_lock_bh(&msk->pm.lock); } static struct mptcp_pm_addr_entry * __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) { struct mptcp_pm_addr_entry *entry; list_for_each_entry(entry, &pernet->local_addr_list, list) { if (entry->addr.id == id) return entry; } return NULL; } static struct mptcp_pm_addr_entry * __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) { struct mptcp_pm_addr_entry *entry; list_for_each_entry(entry, &pernet->local_addr_list, list) { if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) return entry; } return NULL; } static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) { struct sock *sk = (struct sock *)msk; struct mptcp_pm_addr_entry local; unsigned int add_addr_signal_max; bool signal_and_subflow = false; unsigned int local_addr_max; struct pm_nl_pernet *pernet; unsigned int subflows_max; pernet = pm_nl_get_pernet(sock_net(sk)); add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk); local_addr_max = mptcp_pm_get_local_addr_max(msk); subflows_max = mptcp_pm_get_subflows_max(msk); /* do lazy endpoint usage accounting for the MPC subflows */ if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first); struct mptcp_pm_addr_entry *entry; struct mptcp_addr_info mpc_addr; bool backup = false; mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); rcu_read_lock(); entry = __lookup_addr(pernet, &mpc_addr); if (entry) { __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); msk->mpc_endpoint_id = entry->addr.id; backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); } rcu_read_unlock(); if (backup) mptcp_pm_send_ack(msk, subflow, true, backup); msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); } pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", msk->pm.local_addr_used, local_addr_max, msk->pm.add_addr_signaled, add_addr_signal_max, msk->pm.subflows, subflows_max); /* check first for announce */ if (msk->pm.add_addr_signaled < add_addr_signal_max) { /* due to racing events on both ends we can reach here while * previous add address is still running: if we invoke now * mptcp_pm_announce_addr(), that will fail and the * corresponding id will be marked as used. * Instead let the PM machinery reschedule us when the * current address announce will be completed. */ if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) return; if (!select_signal_address(pernet, msk, &local)) goto subflow; /* If the alloc fails, we are on memory pressure, not worth * continuing, and trying to create subflows. */ if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) return; __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); msk->pm.add_addr_signaled++; /* Special case for ID0: set the correct ID */ if (local.addr.id == msk->mpc_endpoint_id) local.addr.id = 0; mptcp_pm_announce_addr(msk, &local.addr, false); mptcp_pm_nl_addr_send_ack(msk); if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) signal_and_subflow = true; } subflow: /* check if should create a new subflow */ while (msk->pm.local_addr_used < local_addr_max && msk->pm.subflows < subflows_max) { struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; bool fullmesh; int i, nr; if (signal_and_subflow) signal_and_subflow = false; else if (!select_local_address(pernet, msk, &local)) break; fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); /* Special case for ID0: set the correct ID */ if (local.addr.id == msk->mpc_endpoint_id) local.addr.id = 0; else /* local_addr_used is not decr for ID 0 */ msk->pm.local_addr_used++; nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); if (nr == 0) continue; spin_unlock_bh(&msk->pm.lock); for (i = 0; i < nr; i++) __mptcp_subflow_connect(sk, &local.addr, &addrs[i]); spin_lock_bh(&msk->pm.lock); } mptcp_pm_nl_check_work_pending(msk); } static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) { mptcp_pm_create_subflow_or_signal_addr(msk); } static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) { mptcp_pm_create_subflow_or_signal_addr(msk); } /* Fill all the local addresses into the array addrs[], * and return the array size. */ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *remote, struct mptcp_addr_info *addrs) { struct sock *sk = (struct sock *)msk; struct mptcp_pm_addr_entry *entry; struct mptcp_addr_info mpc_addr; struct pm_nl_pernet *pernet; unsigned int subflows_max; int i = 0; pernet = pm_nl_get_pernet_from_msk(msk); subflows_max = mptcp_pm_get_subflows_max(msk); mptcp_local_address((struct sock_common *)msk, &mpc_addr); rcu_read_lock(); list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) continue; if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) continue; if (msk->pm.subflows < subflows_max) { msk->pm.subflows++; addrs[i] = entry->addr; /* Special case for ID0: set the correct ID */ if (mptcp_addresses_equal(&entry->addr, &mpc_addr, entry->addr.port)) addrs[i].id = 0; i++; } } rcu_read_unlock(); /* If the array is empty, fill in the single * 'IPADDRANY' local address */ if (!i) { struct mptcp_addr_info local; memset(&local, 0, sizeof(local)); local.family = #if IS_ENABLED(CONFIG_MPTCP_IPV6) remote->family == AF_INET6 && ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : #endif remote->family; if (!mptcp_pm_addr_families_match(sk, &local, remote)) return 0; msk->pm.subflows++; addrs[i++] = local; } return i; } static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) { struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; struct sock *sk = (struct sock *)msk; unsigned int add_addr_accept_max; struct mptcp_addr_info remote; unsigned int subflows_max; bool sf_created = false; int i, nr; add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); subflows_max = mptcp_pm_get_subflows_max(msk); pr_debug("accepted %d:%d remote family %d\n", msk->pm.add_addr_accepted, add_addr_accept_max, msk->pm.remote.family); remote = msk->pm.remote; mptcp_pm_announce_addr(msk, &remote, true); mptcp_pm_nl_addr_send_ack(msk); if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) return; /* pick id 0 port, if none is provided the remote address */ if (!remote.port) remote.port = sk->sk_dport; /* connect to the specified remote address, using whatever * local address the routing configuration will pick. */ nr = fill_local_addresses_vec(msk, &remote, addrs); if (nr == 0) return; spin_unlock_bh(&msk->pm.lock); for (i = 0; i < nr; i++) if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0) sf_created = true; spin_lock_bh(&msk->pm.lock); if (sf_created) { /* add_addr_accepted is not decr for ID 0 */ if (remote.id) msk->pm.add_addr_accepted++; if (msk->pm.add_addr_accepted >= add_addr_accept_max || msk->pm.subflows >= subflows_max) WRITE_ONCE(msk->pm.accept_addr, false); } } bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, const struct mptcp_addr_info *remote) { struct mptcp_addr_info mpc_remote; remote_address((struct sock_common *)msk, &mpc_remote); return mptcp_addresses_equal(&mpc_remote, remote, remote->port); } void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; msk_owned_by_me(msk); lockdep_assert_held(&msk->pm.lock); if (!mptcp_pm_should_add_signal(msk) && !mptcp_pm_should_rm_signal(msk)) return; mptcp_for_each_subflow(msk, subflow) { if (__mptcp_subflow_active(subflow)) { mptcp_pm_send_ack(msk, subflow, false, false); break; } } } int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, struct mptcp_addr_info *addr, struct mptcp_addr_info *rem, u8 bkup) { struct mptcp_subflow_context *subflow; pr_debug("bkup=%d\n", bkup); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct mptcp_addr_info local, remote; mptcp_local_address((struct sock_common *)ssk, &local); if (!mptcp_addresses_equal(&local, addr, addr->port)) continue; if (rem && rem->family != AF_UNSPEC) { remote_address((struct sock_common *)ssk, &remote); if (!mptcp_addresses_equal(&remote, rem, rem->port)) continue; } __mptcp_pm_send_ack(msk, subflow, true, bkup); return 0; } return -EINVAL; } static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list, enum linux_mptcp_mib_field rm_type) { struct mptcp_subflow_context *subflow, *tmp; struct sock *sk = (struct sock *)msk; u8 i; pr_debug("%s rm_list_nr %d\n", rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); msk_owned_by_me(msk); if (sk->sk_state == TCP_LISTEN) return; if (!rm_list->nr) return; if (list_empty(&msk->conn_list)) return; for (i = 0; i < rm_list->nr; i++) { u8 rm_id = rm_list->ids[i]; bool removed = false; mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); u8 remote_id = READ_ONCE(subflow->remote_id); int how = RCV_SHUTDOWN | SEND_SHUTDOWN; u8 id = subflow_get_local_id(subflow); if (inet_sk_state_load(ssk) == TCP_CLOSE) continue; if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) continue; if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) continue; pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", i, rm_id, id, remote_id, msk->mpc_endpoint_id); spin_unlock_bh(&msk->pm.lock); mptcp_subflow_shutdown(sk, ssk, how); /* the following takes care of updating the subflows counter */ mptcp_close_ssk(sk, ssk, subflow); spin_lock_bh(&msk->pm.lock); removed |= subflow->request_join; if (rm_type == MPTCP_MIB_RMSUBFLOW) __MPTCP_INC_STATS(sock_net(sk), rm_type); } if (rm_type == MPTCP_MIB_RMADDR) __MPTCP_INC_STATS(sock_net(sk), rm_type); if (!removed) continue; if (!mptcp_pm_is_kernel(msk)) continue; if (rm_type == MPTCP_MIB_RMADDR && rm_id && !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { /* Note: if the subflow has been closed before, this * add_addr_accepted counter will not be decremented. */ if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk)) WRITE_ONCE(msk->pm.accept_addr, true); } } } static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) { mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); } static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) { mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); } void mptcp_pm_nl_work(struct mptcp_sock *msk) { struct mptcp_pm_data *pm = &msk->pm; msk_owned_by_me(msk); if (!(pm->status & MPTCP_PM_WORK_MASK)) return; spin_lock_bh(&msk->pm.lock); pr_debug("msk=%p status=%x\n", msk, pm->status); if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); mptcp_pm_nl_add_addr_received(msk); } if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); mptcp_pm_nl_addr_send_ack(msk); } if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); mptcp_pm_nl_rm_addr_received(msk); } if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); mptcp_pm_nl_fully_established(msk); } if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); mptcp_pm_nl_subflow_established(msk); } spin_unlock_bh(&msk->pm.lock); } static bool address_use_port(struct mptcp_pm_addr_entry *entry) { return (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == MPTCP_PM_ADDR_FLAG_SIGNAL; } /* caller must ensure the RCU grace period is already elapsed */ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) { if (entry->lsk) sock_release(entry->lsk); kfree(entry); } static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, struct mptcp_pm_addr_entry *entry, bool needs_id) { struct mptcp_pm_addr_entry *cur, *del_entry = NULL; unsigned int addr_max; int ret = -EINVAL; spin_lock_bh(&pernet->lock); /* to keep the code simple, don't do IDR-like allocation for address ID, * just bail when we exceed limits */ if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) pernet->next_id = 1; if (pernet->addrs >= MPTCP_PM_ADDR_MAX) { ret = -ERANGE; goto out; } if (test_bit(entry->addr.id, pernet->id_bitmap)) { ret = -EBUSY; goto out; } /* do not insert duplicate address, differentiate on port only * singled addresses */ if (!address_use_port(entry)) entry->addr.port = 0; list_for_each_entry(cur, &pernet->local_addr_list, list) { if (mptcp_addresses_equal(&cur->addr, &entry->addr, cur->addr.port || entry->addr.port)) { /* allow replacing the exiting endpoint only if such * endpoint is an implicit one and the user-space * did not provide an endpoint id */ if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { ret = -EEXIST; goto out; } if (entry->addr.id) goto out; pernet->addrs--; entry->addr.id = cur->addr.id; list_del_rcu(&cur->list); del_entry = cur; break; } } if (!entry->addr.id && needs_id) { find_next: entry->addr.id = find_next_zero_bit(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1, pernet->next_id); if (!entry->addr.id && pernet->next_id != 1) { pernet->next_id = 1; goto find_next; } } if (!entry->addr.id && needs_id) goto out; __set_bit(entry->addr.id, pernet->id_bitmap); if (entry->addr.id > pernet->next_id) pernet->next_id = entry->addr.id; if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { addr_max = pernet->add_addr_signal_max; WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1); } if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { addr_max = pernet->local_addr_max; WRITE_ONCE(pernet->local_addr_max, addr_max + 1); } pernet->addrs++; if (!entry->addr.port) list_add_tail_rcu(&entry->list, &pernet->local_addr_list); else list_add_rcu(&entry->list, &pernet->local_addr_list); ret = entry->addr.id; out: spin_unlock_bh(&pernet->lock); /* just replaced an existing entry, free it */ if (del_entry) { synchronize_rcu(); __mptcp_pm_release_addr_entry(del_entry); } return ret; } static struct lock_class_key mptcp_slock_keys[2]; static struct lock_class_key mptcp_keys[2]; static int mptcp_pm_nl_create_listen_socket(struct sock *sk, struct mptcp_pm_addr_entry *entry) { bool is_ipv6 = sk->sk_family == AF_INET6; int addrlen = sizeof(struct sockaddr_in); struct sockaddr_storage addr; struct sock *newsk, *ssk; int backlog = 1024; int err; err = sock_create_kern(sock_net(sk), entry->addr.family, SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); if (err) return err; newsk = entry->lsk->sk; if (!newsk) return -EINVAL; /* The subflow socket lock is acquired in a nested to the msk one * in several places, even by the TCP stack, and this msk is a kernel * socket: lockdep complains. Instead of propagating the _nested * modifiers in several places, re-init the lock class for the msk * socket to an mptcp specific one. */ sock_lock_init_class_and_name(newsk, is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", &mptcp_slock_keys[is_ipv6], is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", &mptcp_keys[is_ipv6]); lock_sock(newsk); ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); release_sock(newsk); if (IS_ERR(ssk)) return PTR_ERR(ssk); mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (entry->addr.family == AF_INET6) addrlen = sizeof(struct sockaddr_in6); #endif if (ssk->sk_family == AF_INET) err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); #if IS_ENABLED(CONFIG_MPTCP_IPV6) else if (ssk->sk_family == AF_INET6) err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); #endif if (err) return err; /* We don't use mptcp_set_state() here because it needs to be called * under the msk socket lock. For the moment, that will not bring * anything more than only calling inet_sk_state_store(), because the * old status is known (TCP_CLOSE). */ inet_sk_state_store(newsk, TCP_LISTEN); lock_sock(ssk); err = __inet_listen_sk(ssk, backlog); if (!err) mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); release_sock(ssk); return err; } int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc) { struct mptcp_pm_addr_entry *entry; struct pm_nl_pernet *pernet; int ret = -1; pernet = pm_nl_get_pernet_from_msk(msk); rcu_read_lock(); list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { ret = entry->addr.id; break; } } rcu_read_unlock(); if (ret >= 0) return ret; /* address not found, add to local list */ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; entry->addr = *skc; entry->addr.id = 0; entry->addr.port = 0; entry->ifindex = 0; entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; entry->lsk = NULL; ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); if (ret < 0) kfree(entry); return ret; } bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) { struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); struct mptcp_pm_addr_entry *entry; bool backup = false; rcu_read_lock(); list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); break; } } rcu_read_unlock(); return backup; } #define MPTCP_PM_CMD_GRP_OFFSET 0 #define MPTCP_PM_EV_GRP_OFFSET 1 static const struct genl_multicast_group mptcp_pm_mcgrps[] = { [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, }, [MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME, .flags = GENL_MCAST_CAP_NET_ADMIN, }, }; void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = (struct sock *)msk; unsigned int active_max_loss_cnt; struct net *net = sock_net(sk); unsigned int stale_loss_cnt; bool slow; stale_loss_cnt = mptcp_stale_loss_cnt(net); if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt) return; /* look for another available subflow not in loss state */ active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1); mptcp_for_each_subflow(msk, iter) { if (iter != subflow && mptcp_subflow_active(iter) && iter->stale_count < active_max_loss_cnt) { /* we have some alternatives, try to mark this subflow as idle ...*/ slow = lock_sock_fast(ssk); if (!tcp_rtx_and_write_queues_empty(ssk)) { subflow->stale = 1; __mptcp_retransmit_pending_data(sk); MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE); } unlock_sock_fast(ssk, slow); /* always try to push the pending data regardless of re-injections: * we can possibly use backup subflows now, and subflow selection * is cheap under the msk socket lock */ __mptcp_push_pending(sk, 0); return; } } } static int mptcp_pm_family_to_addr(int family) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (family == AF_INET6) return MPTCP_PM_ADDR_ATTR_ADDR6; #endif return MPTCP_PM_ADDR_ATTR_ADDR4; } static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[], const struct nlattr *attr, struct genl_info *info, struct mptcp_addr_info *addr, bool require_family) { int err, addr_addr; if (!attr) { GENL_SET_ERR_MSG(info, "missing address info"); return -EINVAL; } /* no validation needed - was already done via nested policy */ err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, mptcp_pm_address_nl_policy, info->extack); if (err) return err; if (tb[MPTCP_PM_ADDR_ATTR_ID]) addr->id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]); if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) { if (!require_family) return 0; NL_SET_ERR_MSG_ATTR(info->extack, attr, "missing family"); return -EINVAL; } addr->family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]); if (addr->family != AF_INET #if IS_ENABLED(CONFIG_MPTCP_IPV6) && addr->family != AF_INET6 #endif ) { NL_SET_ERR_MSG_ATTR(info->extack, attr, "unknown address family"); return -EINVAL; } addr_addr = mptcp_pm_family_to_addr(addr->family); if (!tb[addr_addr]) { NL_SET_ERR_MSG_ATTR(info->extack, attr, "missing address data"); return -EINVAL; } #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (addr->family == AF_INET6) addr->addr6 = nla_get_in6_addr(tb[addr_addr]); else #endif addr->addr.s_addr = nla_get_in_addr(tb[addr_addr]); if (tb[MPTCP_PM_ADDR_ATTR_PORT]) addr->port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); return 0; } int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, struct mptcp_addr_info *addr) { struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; memset(addr, 0, sizeof(*addr)); return mptcp_pm_parse_pm_addr_attr(tb, attr, info, addr, true); } int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, bool require_family, struct mptcp_pm_addr_entry *entry) { struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; int err; memset(entry, 0, sizeof(*entry)); err = mptcp_pm_parse_pm_addr_attr(tb, attr, info, &entry->addr, require_family); if (err) return err; if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) { u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]); entry->ifindex = val; } if (tb[MPTCP_PM_ADDR_ATTR_FLAGS]) entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]); if (tb[MPTCP_PM_ADDR_ATTR_PORT]) entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT])); return 0; } static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) { return pm_nl_get_pernet(genl_info_net(info)); } static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, struct mptcp_addr_info *addr) { struct mptcp_sock *msk; long s_slot = 0, s_num = 0; while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { struct sock *sk = (struct sock *)msk; struct mptcp_addr_info mpc_addr; if (!READ_ONCE(msk->fully_established) || mptcp_pm_is_userspace(msk)) goto next; /* if the endp linked to the init sf is re-added with a != ID */ mptcp_local_address((struct sock_common *)msk, &mpc_addr); lock_sock(sk); spin_lock_bh(&msk->pm.lock); if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) msk->mpc_endpoint_id = addr->id; mptcp_pm_create_subflow_or_signal_addr(msk); spin_unlock_bh(&msk->pm.lock); release_sock(sk); next: sock_put(sk); cond_resched(); } return 0; } static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, struct genl_info *info) { struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, mptcp_pm_address_nl_policy, info->extack) && tb[MPTCP_PM_ADDR_ATTR_ID]) return true; return false; } int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; struct pm_nl_pernet *pernet = genl_info_pm_nl(info); struct mptcp_pm_addr_entry addr, *entry; int ret; ret = mptcp_pm_parse_entry(attr, info, true, &addr); if (ret < 0) return ret; if (addr.addr.port && !address_use_port(&addr)) { GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); return -EINVAL; } if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh"); return -EINVAL; } if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint"); return -EINVAL; } entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT); if (!entry) { GENL_SET_ERR_MSG(info, "can't allocate addr"); return -ENOMEM; } *entry = addr; if (entry->addr.port) { ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); if (ret) { GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); goto out_free; } } ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, !mptcp_pm_has_addr_attr_id(attr, info)); if (ret < 0) { GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); goto out_free; } mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); return 0; out_free: __mptcp_pm_release_addr_entry(entry); return ret; } int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, u8 *flags, int *ifindex) { struct mptcp_pm_addr_entry *entry; struct sock *sk = (struct sock *)msk; struct net *net = sock_net(sk); /* No entries with ID 0 */ if (id == 0) return 0; rcu_read_lock(); entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id); if (entry) { *flags = entry->flags; *ifindex = entry->ifindex; } rcu_read_unlock(); return 0; } static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, const struct mptcp_addr_info *addr) { struct mptcp_pm_add_entry *entry; entry = mptcp_pm_del_add_timer(msk, addr, false); if (entry) { list_del(&entry->list); kfree(entry); return true; } return false; } static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, const struct mptcp_addr_info *addr) { return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; } static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, const struct mptcp_addr_info *addr, bool force) { struct mptcp_rm_list list = { .nr = 0 }; bool ret; list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); ret = remove_anno_list_by_saddr(msk, addr); if (ret || force) { spin_lock_bh(&msk->pm.lock); if (ret) { __set_bit(addr->id, msk->pm.id_avail_bitmap); msk->pm.add_addr_signaled--; } mptcp_pm_remove_addr(msk, &list); spin_unlock_bh(&msk->pm.lock); } return ret; } static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) { /* If it was marked as used, and not ID 0, decrement local_addr_used */ if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) msk->pm.local_addr_used--; } static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, const struct mptcp_pm_addr_entry *entry) { const struct mptcp_addr_info *addr = &entry->addr; struct mptcp_rm_list list = { .nr = 1 }; long s_slot = 0, s_num = 0; struct mptcp_sock *msk; pr_debug("remove_id=%d\n", addr->id); while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { struct sock *sk = (struct sock *)msk; bool remove_subflow; if (mptcp_pm_is_userspace(msk)) goto next; if (list_empty(&msk->conn_list)) { mptcp_pm_remove_anno_addr(msk, addr, false); goto next; } lock_sock(sk); remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); list.ids[0] = mptcp_endp_get_local_id(msk, addr); if (remove_subflow) { spin_lock_bh(&msk->pm.lock); mptcp_pm_nl_rm_subflow_received(msk, &list); spin_unlock_bh(&msk->pm.lock); } if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { spin_lock_bh(&msk->pm.lock); __mark_subflow_endp_available(msk, list.ids[0]); spin_unlock_bh(&msk->pm.lock); } if (msk->mpc_endpoint_id == entry->addr.id) msk->mpc_endpoint_id = 0; release_sock(sk); next: sock_put(sk); cond_resched(); } return 0; } static int mptcp_nl_remove_id_zero_address(struct net *net, struct mptcp_addr_info *addr) { struct mptcp_rm_list list = { .nr = 0 }; long s_slot = 0, s_num = 0; struct mptcp_sock *msk; list.ids[list.nr++] = 0; while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { struct sock *sk = (struct sock *)msk; struct mptcp_addr_info msk_local; if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) goto next; mptcp_local_address((struct sock_common *)msk, &msk_local); if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) goto next; lock_sock(sk); spin_lock_bh(&msk->pm.lock); mptcp_pm_remove_addr(msk, &list); mptcp_pm_nl_rm_subflow_received(msk, &list); __mark_subflow_endp_available(msk, 0); spin_unlock_bh(&msk->pm.lock); release_sock(sk); next: sock_put(sk); cond_resched(); } return 0; } int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; struct pm_nl_pernet *pernet = genl_info_pm_nl(info); struct mptcp_pm_addr_entry addr, *entry; unsigned int addr_max; int ret; ret = mptcp_pm_parse_entry(attr, info, false, &addr); if (ret < 0) return ret; /* the zero id address is special: the first address used by the msk * always gets such an id, so different subflows can have different zero * id addresses. Additionally zero id is not accounted for in id_bitmap. * Let's use an 'mptcp_rm_list' instead of the common remove code. */ if (addr.addr.id == 0) return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); spin_lock_bh(&pernet->lock); entry = __lookup_addr_by_id(pernet, addr.addr.id); if (!entry) { GENL_SET_ERR_MSG(info, "address not found"); spin_unlock_bh(&pernet->lock); return -EINVAL; } if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { addr_max = pernet->add_addr_signal_max; WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1); } if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { addr_max = pernet->local_addr_max; WRITE_ONCE(pernet->local_addr_max, addr_max - 1); } pernet->addrs--; list_del_rcu(&entry->list); __clear_bit(entry->addr.id, pernet->id_bitmap); spin_unlock_bh(&pernet->lock); mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); synchronize_rcu(); __mptcp_pm_release_addr_entry(entry); return ret; } /* Called from the userspace PM only */ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) { struct mptcp_rm_list alist = { .nr = 0 }; struct mptcp_pm_addr_entry *entry; int anno_nr = 0; list_for_each_entry(entry, rm_list, list) { if (alist.nr >= MPTCP_RM_IDS_MAX) break; /* only delete if either announced or matching a subflow */ if (remove_anno_list_by_saddr(msk, &entry->addr)) anno_nr++; else if (!lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) continue; alist.ids[alist.nr++] = entry->addr.id; } if (alist.nr) { spin_lock_bh(&msk->pm.lock); msk->pm.add_addr_signaled -= anno_nr; mptcp_pm_remove_addr(msk, &alist); spin_unlock_bh(&msk->pm.lock); } } /* Called from the in-kernel PM only */ static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, struct list_head *rm_list) { struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; struct mptcp_pm_addr_entry *entry; list_for_each_entry(entry, rm_list, list) { if (slist.nr < MPTCP_RM_IDS_MAX && lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); if (alist.nr < MPTCP_RM_IDS_MAX && remove_anno_list_by_saddr(msk, &entry->addr)) alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); } spin_lock_bh(&msk->pm.lock); if (alist.nr) { msk->pm.add_addr_signaled -= alist.nr; mptcp_pm_remove_addr(msk, &alist); } if (slist.nr) mptcp_pm_nl_rm_subflow_received(msk, &slist); /* Reset counters: maybe some subflows have been removed before */ bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); msk->pm.local_addr_used = 0; spin_unlock_bh(&msk->pm.lock); } static void mptcp_nl_remove_addrs_list(struct net *net, struct list_head *rm_list) { long s_slot = 0, s_num = 0; struct mptcp_sock *msk; if (list_empty(rm_list)) return; while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { struct sock *sk = (struct sock *)msk; if (!mptcp_pm_is_userspace(msk)) { lock_sock(sk); mptcp_pm_remove_addrs_and_subflows(msk, rm_list); release_sock(sk); } sock_put(sk); cond_resched(); } } /* caller must ensure the RCU grace period is already elapsed */ static void __flush_addrs(struct list_head *list) { while (!list_empty(list)) { struct mptcp_pm_addr_entry *cur; cur = list_entry(list->next, struct mptcp_pm_addr_entry, list); list_del_rcu(&cur->list); __mptcp_pm_release_addr_entry(cur); } } static void __reset_counters(struct pm_nl_pernet *pernet) { WRITE_ONCE(pernet->add_addr_signal_max, 0); WRITE_ONCE(pernet->add_addr_accept_max, 0); WRITE_ONCE(pernet->local_addr_max, 0); pernet->addrs = 0; } int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) { struct pm_nl_pernet *pernet = genl_info_pm_nl(info); LIST_HEAD(free_list); spin_lock_bh(&pernet->lock); list_splice_init(&pernet->local_addr_list, &free_list); __reset_counters(pernet); pernet->next_id = 1; bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); spin_unlock_bh(&pernet->lock); mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list); synchronize_rcu(); __flush_addrs(&free_list); return 0; } int mptcp_nl_fill_addr(struct sk_buff *skb, struct mptcp_pm_addr_entry *entry) { struct mptcp_addr_info *addr = &entry->addr; struct nlattr *attr; attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR); if (!attr) return -EMSGSIZE; if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family)) goto nla_put_failure; if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port))) goto nla_put_failure; if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id)) goto nla_put_failure; if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags)) goto nla_put_failure; if (entry->ifindex && nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex)) goto nla_put_failure; if (addr->family == AF_INET && nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4, addr->addr.s_addr)) goto nla_put_failure; #if IS_ENABLED(CONFIG_MPTCP_IPV6) else if (addr->family == AF_INET6 && nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6)) goto nla_put_failure; #endif nla_nest_end(skb, attr); return 0; nla_put_failure: nla_nest_cancel(skb, attr); return -EMSGSIZE; } int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info) { struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; struct pm_nl_pernet *pernet = genl_info_pm_nl(info); struct mptcp_pm_addr_entry addr, *entry; struct sk_buff *msg; void *reply; int ret; ret = mptcp_pm_parse_entry(attr, info, false, &addr); if (ret < 0) return ret; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, info->genlhdr->cmd); if (!reply) { GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); ret = -EMSGSIZE; goto fail; } spin_lock_bh(&pernet->lock); entry = __lookup_addr_by_id(pernet, addr.addr.id); if (!entry) { GENL_SET_ERR_MSG(info, "address not found"); ret = -EINVAL; goto unlock_fail; } ret = mptcp_nl_fill_addr(msg, entry); if (ret) goto unlock_fail; genlmsg_end(msg, reply); ret = genlmsg_reply(msg, info); spin_unlock_bh(&pernet->lock); return ret; unlock_fail: spin_unlock_bh(&pernet->lock); fail: nlmsg_free(msg); return ret; } int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info) { return mptcp_pm_get_addr(skb, info); } int mptcp_pm_nl_dump_addr(struct sk_buff *msg, struct netlink_callback *cb) { struct net *net = sock_net(msg->sk); struct mptcp_pm_addr_entry *entry; struct pm_nl_pernet *pernet; int id = cb->args[0]; void *hdr; int i; pernet = pm_nl_get_pernet(net); spin_lock_bh(&pernet->lock); for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { if (test_bit(i, pernet->id_bitmap)) { entry = __lookup_addr_by_id(pernet, i); if (!entry) break; if (entry->addr.id <= id) continue; hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &mptcp_genl_family, NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); if (!hdr) break; if (mptcp_nl_fill_addr(msg, entry) < 0) { genlmsg_cancel(msg, hdr); break; } id = entry->addr.id; genlmsg_end(msg, hdr); } } spin_unlock_bh(&pernet->lock); cb->args[0] = id; return msg->len; } int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { return mptcp_pm_dump_addr(msg, cb); } static int parse_limit(struct genl_info *info, int id, unsigned int *limit) { struct nlattr *attr = info->attrs[id]; if (!attr) return 0; *limit = nla_get_u32(attr); if (*limit > MPTCP_PM_ADDR_MAX) { GENL_SET_ERR_MSG(info, "limit greater than maximum"); return -EINVAL; } return 0; } int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) { struct pm_nl_pernet *pernet = genl_info_pm_nl(info); unsigned int rcv_addrs, subflows; int ret; spin_lock_bh(&pernet->lock); rcv_addrs = pernet->add_addr_accept_max; ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); if (ret) goto unlock; subflows = pernet->subflows_max; ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); if (ret) goto unlock; WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs); WRITE_ONCE(pernet->subflows_max, subflows); unlock: spin_unlock_bh(&pernet->lock); return ret; } int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) { struct pm_nl_pernet *pernet = genl_info_pm_nl(info); struct sk_buff *msg; void *reply; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, MPTCP_PM_CMD_GET_LIMITS); if (!reply) goto fail; if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, READ_ONCE(pernet->add_addr_accept_max))) goto fail; if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, READ_ONCE(pernet->subflows_max))) goto fail; genlmsg_end(msg, reply); return genlmsg_reply(msg, info); fail: GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); nlmsg_free(msg); return -EMSGSIZE; } static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, struct mptcp_addr_info *addr) { struct mptcp_rm_list list = { .nr = 0 }; list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); spin_lock_bh(&msk->pm.lock); mptcp_pm_nl_rm_subflow_received(msk, &list); __mark_subflow_endp_available(msk, list.ids[0]); mptcp_pm_create_subflow_or_signal_addr(msk); spin_unlock_bh(&msk->pm.lock); } static int mptcp_nl_set_flags(struct net *net, struct mptcp_addr_info *addr, u8 bkup, u8 changed) { long s_slot = 0, s_num = 0; struct mptcp_sock *msk; int ret = -EINVAL; while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { struct sock *sk = (struct sock *)msk; if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) goto next; lock_sock(sk); if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup); if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) mptcp_pm_nl_fullmesh(msk, addr); release_sock(sk); next: sock_put(sk); cond_resched(); } return ret; } int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info) { struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }; struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | MPTCP_PM_ADDR_FLAG_FULLMESH; struct net *net = sock_net(skb->sk); struct mptcp_pm_addr_entry *entry; struct pm_nl_pernet *pernet; u8 lookup_by_id = 0; u8 bkup = 0; int ret; pernet = pm_nl_get_pernet(net); ret = mptcp_pm_parse_entry(attr, info, false, &addr); if (ret < 0) return ret; if (addr.addr.family == AF_UNSPEC) { lookup_by_id = 1; if (!addr.addr.id) { GENL_SET_ERR_MSG(info, "missing required inputs"); return -EOPNOTSUPP; } } if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) bkup = 1; spin_lock_bh(&pernet->lock); entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) : __lookup_addr(pernet, &addr.addr); if (!entry) { spin_unlock_bh(&pernet->lock); GENL_SET_ERR_MSG(info, "address not found"); return -EINVAL; } if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { spin_unlock_bh(&pernet->lock); GENL_SET_ERR_MSG(info, "invalid addr flags"); return -EINVAL; } changed = (addr.flags ^ entry->flags) & mask; entry->flags = (entry->flags & ~mask) | (addr.flags & mask); addr = *entry; spin_unlock_bh(&pernet->lock); mptcp_nl_set_flags(net, &addr.addr, bkup, changed); return 0; } int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info) { return mptcp_pm_set_flags(skb, info); } static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp) { genlmsg_multicast_netns(&mptcp_genl_family, net, nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp); } bool mptcp_userspace_pm_active(const struct mptcp_sock *msk) { return genl_has_listeners(&mptcp_genl_family, sock_net((const struct sock *)msk), MPTCP_PM_EV_GRP_OFFSET); } static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk) { const struct inet_sock *issk = inet_sk(ssk); const struct mptcp_subflow_context *sf; if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) return -EMSGSIZE; switch (ssk->sk_family) { case AF_INET: if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) return -EMSGSIZE; if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr)) return -EMSGSIZE; break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: { const struct ipv6_pinfo *np = inet6_sk(ssk); if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) return -EMSGSIZE; if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr)) return -EMSGSIZE; break; } #endif default: WARN_ON_ONCE(1); return -EMSGSIZE; } if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) return -EMSGSIZE; if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport)) return -EMSGSIZE; sf = mptcp_subflow_ctx(ssk); if (WARN_ON_ONCE(!sf)) return -EINVAL; if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf))) return -EMSGSIZE; if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id)) return -EMSGSIZE; return 0; } static int mptcp_event_put_token_and_ssk(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { const struct sock *sk = (const struct sock *)msk; const struct mptcp_subflow_context *sf; u8 sk_err; if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) return -EMSGSIZE; if (mptcp_event_add_subflow(skb, ssk)) return -EMSGSIZE; sf = mptcp_subflow_ctx(ssk); if (WARN_ON_ONCE(!sf)) return -EINVAL; if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup)) return -EMSGSIZE; if (ssk->sk_bound_dev_if && nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if)) return -EMSGSIZE; sk_err = READ_ONCE(ssk->sk_err); if (sk_err && sk->sk_state == TCP_ESTABLISHED && nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err)) return -EMSGSIZE; return 0; } static int mptcp_event_sub_established(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { return mptcp_event_put_token_and_ssk(skb, msk, ssk); } static int mptcp_event_sub_closed(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { const struct mptcp_subflow_context *sf; if (mptcp_event_put_token_and_ssk(skb, msk, ssk)) return -EMSGSIZE; sf = mptcp_subflow_ctx(ssk); if (!sf->reset_seen) return 0; if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason)) return -EMSGSIZE; if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient)) return -EMSGSIZE; return 0; } static int mptcp_event_created(struct sk_buff *skb, const struct mptcp_sock *msk, const struct sock *ssk) { int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)); if (err) return err; if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side))) return -EMSGSIZE; return mptcp_event_add_subflow(skb, ssk); } void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id) { struct net *net = sock_net((const struct sock *)msk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED); if (!nlh) goto nla_put_failure; if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) goto nla_put_failure; if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id)) goto nla_put_failure; genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); return; nla_put_failure: nlmsg_free(skb); } void mptcp_event_addr_announced(const struct sock *ssk, const struct mptcp_addr_info *info) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct mptcp_sock *msk = mptcp_sk(subflow->conn); struct net *net = sock_net(ssk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_ANNOUNCED); if (!nlh) goto nla_put_failure; if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token))) goto nla_put_failure; if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id)) goto nla_put_failure; if (nla_put_be16(skb, MPTCP_ATTR_DPORT, info->port == 0 ? inet_sk(ssk)->inet_dport : info->port)) goto nla_put_failure; switch (info->family) { case AF_INET: if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr)) goto nla_put_failure; break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6)) goto nla_put_failure; break; #endif default: WARN_ON_ONCE(1); goto nla_put_failure; } genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, GFP_ATOMIC); return; nla_put_failure: nlmsg_free(skb); } void mptcp_event_pm_listener(const struct sock *ssk, enum mptcp_event_type event) { const struct inet_sock *issk = inet_sk(ssk); struct net *net = sock_net(ssk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, event); if (!nlh) goto nla_put_failure; if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family)) goto nla_put_failure; if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport)) goto nla_put_failure; switch (ssk->sk_family) { case AF_INET: if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr)) goto nla_put_failure; break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: { const struct ipv6_pinfo *np = inet6_sk(ssk); if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) goto nla_put_failure; break; } #endif default: WARN_ON_ONCE(1); goto nla_put_failure; } genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, GFP_KERNEL); return; nla_put_failure: nlmsg_free(skb); } void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp) { struct net *net = sock_net((const struct sock *)msk); struct nlmsghdr *nlh; struct sk_buff *skb; if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET)) return; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!skb) return; nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type); if (!nlh) goto nla_put_failure; switch (type) { case MPTCP_EVENT_UNSPEC: WARN_ON_ONCE(1); break; case MPTCP_EVENT_CREATED: case MPTCP_EVENT_ESTABLISHED: if (mptcp_event_created(skb, msk, ssk) < 0) goto nla_put_failure; break; case MPTCP_EVENT_CLOSED: if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0) goto nla_put_failure; break; case MPTCP_EVENT_ANNOUNCED: case MPTCP_EVENT_REMOVED: /* call mptcp_event_addr_announced()/removed instead */ WARN_ON_ONCE(1); break; case MPTCP_EVENT_SUB_ESTABLISHED: case MPTCP_EVENT_SUB_PRIORITY: if (mptcp_event_sub_established(skb, msk, ssk) < 0) goto nla_put_failure; break; case MPTCP_EVENT_SUB_CLOSED: if (mptcp_event_sub_closed(skb, msk, ssk) < 0) goto nla_put_failure; break; case MPTCP_EVENT_LISTENER_CREATED: case MPTCP_EVENT_LISTENER_CLOSED: break; } genlmsg_end(skb, nlh); mptcp_nl_mcast_send(net, skb, gfp); return; nla_put_failure: nlmsg_free(skb); } struct genl_family mptcp_genl_family __ro_after_init = { .name = MPTCP_PM_NAME, .version = MPTCP_PM_VER, .netnsok = true, .module = THIS_MODULE, .ops = mptcp_pm_nl_ops, .n_ops = ARRAY_SIZE(mptcp_pm_nl_ops), .resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1, .mcgrps = mptcp_pm_mcgrps, .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps), }; static int __net_init pm_nl_init_net(struct net *net) { struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); INIT_LIST_HEAD_RCU(&pernet->local_addr_list); /* Cit. 2 subflows ought to be enough for anybody. */ pernet->subflows_max = 2; pernet->next_id = 1; pernet->stale_loss_cnt = 4; spin_lock_init(&pernet->lock); /* No need to initialize other pernet fields, the struct is zeroed at * allocation time. */ return 0; } static void __net_exit pm_nl_exit_net(struct list_head *net_list) { struct net *net; list_for_each_entry(net, net_list, exit_list) { struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); /* net is removed from namespace list, can't race with * other modifiers, also netns core already waited for a * RCU grace period. */ __flush_addrs(&pernet->local_addr_list); } } static struct pernet_operations mptcp_pm_pernet_ops = { .init = pm_nl_init_net, .exit_batch = pm_nl_exit_net, .id = &pm_nl_pernet_id, .size = sizeof(struct pm_nl_pernet), }; void __init mptcp_pm_nl_init(void) { if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) panic("Failed to register MPTCP PM pernet subsystem.\n"); if (genl_register_family(&mptcp_genl_family)) panic("Failed to register MPTCP PM netlink family\n"); }
6 6 6 6 37 13 6 29 8 8 8 8 2 6 8 8 6 9 1 1 5 9 9 1 1 1 1 70 71 2 70 70 24 55 4 4 1 2 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext4 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/time.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/dax.h> #include <linux/quotaops.h> #include <linux/pagevec.h> #include <linux/uio.h> #include <linux/mman.h> #include <linux/backing-dev.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "truncate.h" /* * Returns %true if the given DIO request should be attempted with DIO, or * %false if it should fall back to buffered I/O. * * DIO isn't well specified; when it's unsupported (either due to the request * being misaligned, or due to the file not supporting DIO at all), filesystems * either fall back to buffered I/O or return EINVAL. For files that don't use * any special features like encryption or verity, ext4 has traditionally * returned EINVAL for misaligned DIO. iomap_dio_rw() uses this convention too. * In this case, we should attempt the DIO, *not* fall back to buffered I/O. * * In contrast, in cases where DIO is unsupported due to ext4 features, ext4 * traditionally falls back to buffered I/O. * * This function implements the traditional ext4 behavior in all these cases. */ static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter) { struct inode *inode = file_inode(iocb->ki_filp); u32 dio_align = ext4_dio_alignment(inode); if (dio_align == 0) return false; if (dio_align == 1) return true; return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align); } static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to) { ssize_t ret; struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock_shared(inode)) return -EAGAIN; } else { inode_lock_shared(inode); } if (!ext4_should_use_dio(iocb, to)) { inode_unlock_shared(inode); /* * Fallback to buffered I/O if the operation being performed on * the inode is not supported by direct I/O. The IOCB_DIRECT * flag needs to be cleared here in order to ensure that the * direct I/O path within generic_file_read_iter() is not * taken. */ iocb->ki_flags &= ~IOCB_DIRECT; return generic_file_read_iter(iocb, to); } ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, NULL, 0); inode_unlock_shared(inode); file_accessed(iocb->ki_filp); return ret; } #ifdef CONFIG_FS_DAX static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock_shared(inode)) return -EAGAIN; } else { inode_lock_shared(inode); } /* * Recheck under inode lock - at this point we are sure it cannot * change anymore */ if (!IS_DAX(inode)) { inode_unlock_shared(inode); /* Fallback to buffered IO in case we cannot support DAX */ return generic_file_read_iter(iocb, to); } ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops); inode_unlock_shared(inode); file_accessed(iocb->ki_filp); return ret; } #endif static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; if (!iov_iter_count(to)) return 0; /* skip atime */ #ifdef CONFIG_FS_DAX if (IS_DAX(inode)) return ext4_dax_read_iter(iocb, to); #endif if (iocb->ki_flags & IOCB_DIRECT) return ext4_dio_read_iter(iocb, to); return generic_file_read_iter(iocb, to); } static ssize_t ext4_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *inode = file_inode(in); if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; return filemap_splice_read(in, ppos, pipe, len, flags); } /* * Called when an inode is released. Note that this is different * from ext4_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext4_release_file(struct inode *inode, struct file *filp) { if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { ext4_alloc_da_blocks(inode); ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); } /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1) && !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) ext4_htree_free_dir_info(filp->private_data); return 0; } /* * This tests whether the IO in question is block-aligned or not. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they * are converted to written only after the IO is complete. Until they are * mapped, these blocks appear as holes, so dio_zero_block() will assume that * it needs to zero out portions of the start and/or end block. If 2 AIO * threads are at work on the same unwritten block, they must be synchronized * or one thread will zero the other's data, causing corruption. */ static bool ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos) { struct super_block *sb = inode->i_sb; unsigned long blockmask = sb->s_blocksize - 1; if ((pos | iov_iter_alignment(from)) & blockmask) return true; return false; } static bool ext4_extending_io(struct inode *inode, loff_t offset, size_t len) { if (offset + len > i_size_read(inode) || offset + len > EXT4_I(inode)->i_disksize) return true; return false; } /* Is IO overwriting allocated or initialized blocks? */ static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len, bool *unwritten) { struct ext4_map_blocks map; unsigned int blkbits = inode->i_blkbits; int err, blklen; if (pos + len > i_size_read(inode)) return false; map.m_lblk = pos >> blkbits; map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); blklen = map.m_len; err = ext4_map_blocks(NULL, inode, &map, 0); if (err != blklen) return false; /* * 'err==len' means that all of the blocks have been preallocated, * regardless of whether they have been initialized or not. We need to * check m_flags to distinguish the unwritten extents. */ *unwritten = !(map.m_flags & EXT4_MAP_MAPPED); return true; } static ssize_t ext4_generic_write_checks(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; if (unlikely(IS_IMMUTABLE(inode))) return -EPERM; ret = generic_write_checks(iocb, from); if (ret <= 0) return ret; /* * If we have encountered a bitmap-format file, the size limit * is smaller than s_maxbytes, which is for extent-mapped files. */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) return -EFBIG; iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); } return iov_iter_count(from); } static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret, count; count = ext4_generic_write_checks(iocb, from); if (count <= 0) return count; ret = file_modified(iocb->ki_filp); if (ret) return ret; return count; } static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_NOWAIT) return -EOPNOTSUPP; inode_lock(inode); ret = ext4_write_checks(iocb, from); if (ret <= 0) goto out; ret = generic_perform_write(iocb, from); out: inode_unlock(inode); if (unlikely(ret <= 0)) return ret; return generic_write_sync(iocb, ret); } static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, ssize_t count) { handle_t *handle; lockdep_assert_held_write(&inode->i_rwsem); handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) return PTR_ERR(handle); if (ext4_update_inode_size(inode, offset + count)) { int ret = ext4_mark_inode_dirty(handle, inode); if (unlikely(ret)) { ext4_journal_stop(handle); return ret; } } if (inode->i_nlink) ext4_orphan_del(handle, inode); ext4_journal_stop(handle); return count; } /* * Clean up the inode after DIO or DAX extending write has completed and the * inode size has been updated using ext4_handle_inode_extension(). */ static void ext4_inode_extension_cleanup(struct inode *inode, ssize_t count) { lockdep_assert_held_write(&inode->i_rwsem); if (count < 0) { ext4_truncate_failed_write(inode); /* * If the truncate operation failed early, then the inode may * still be on the orphan list. In that case, we need to try * remove the inode from the in-memory linked list. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); return; } /* * If i_disksize got extended either due to writeback of delalloc * blocks or extending truncate while the DIO was running we could fail * to cleanup the orphan list in ext4_handle_inode_extension(). Do it * now. */ if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) { handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { /* * The write has successfully completed. Not much to * do with the error here so just cleanup the orphan * list and hope for the best. */ ext4_orphan_del(NULL, inode); return; } ext4_orphan_del(handle, inode); ext4_journal_stop(handle); } } static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error, unsigned int flags) { loff_t pos = iocb->ki_pos; struct inode *inode = file_inode(iocb->ki_filp); if (!error && size && flags & IOMAP_DIO_UNWRITTEN) error = ext4_convert_unwritten_extents(NULL, inode, pos, size); if (error) return error; /* * Note that EXT4_I(inode)->i_disksize can get extended up to * inode->i_size while the I/O was running due to writeback of delalloc * blocks. But the code in ext4_iomap_alloc() is careful to use * zeroed/unwritten extents if this is possible; thus we won't leave * uninitialized blocks in a file even if we didn't succeed in writing * as much as we intended. Also we can race with truncate or write * expanding the file so we have to be a bit careful here. */ if (pos + size <= READ_ONCE(EXT4_I(inode)->i_disksize) && pos + size <= i_size_read(inode)) return size; return ext4_handle_inode_extension(inode, pos, size); } static const struct iomap_dio_ops ext4_dio_write_ops = { .end_io = ext4_dio_write_end_io, }; /* * The intention here is to start with shared lock acquired then see if any * condition requires an exclusive inode lock. If yes, then we restart the * whole operation by releasing the shared lock and acquiring exclusive lock. * * - For unaligned_io we never take shared lock as it may cause data corruption * when two unaligned IO tries to modify the same block e.g. while zeroing. * * - For extending writes case we don't take the shared lock, since it requires * updating inode i_disksize and/or orphan handling with exclusive lock. * * - shared locking will only be true mostly with overwrites, including * initialized blocks and unwritten blocks. For overwrite unwritten blocks * we protect splitting extents by i_data_sem in ext4_inode_info, so we can * also release exclusive i_rwsem lock. * * - Otherwise we will switch to exclusive i_rwsem lock. */ static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from, bool *ilock_shared, bool *extend, bool *unwritten, int *dio_flags) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); loff_t offset; size_t count; ssize_t ret; bool overwrite, unaligned_io; restart: ret = ext4_generic_write_checks(iocb, from); if (ret <= 0) goto out; offset = iocb->ki_pos; count = ret; unaligned_io = ext4_unaligned_io(inode, from, offset); *extend = ext4_extending_io(inode, offset, count); overwrite = ext4_overwrite_io(inode, offset, count, unwritten); /* * Determine whether we need to upgrade to an exclusive lock. This is * required to change security info in file_modified(), for extending * I/O, any form of non-overwrite I/O, and unaligned I/O to unwritten * extents (as partial block zeroing may be required). * * Note that unaligned writes are allowed under shared lock so long as * they are pure overwrites. Otherwise, concurrent unaligned writes risk * data corruption due to partial block zeroing in the dio layer, and so * the I/O must occur exclusively. */ if (*ilock_shared && ((!IS_NOSEC(inode) || *extend || !overwrite || (unaligned_io && *unwritten)))) { if (iocb->ki_flags & IOCB_NOWAIT) { ret = -EAGAIN; goto out; } inode_unlock_shared(inode); *ilock_shared = false; inode_lock(inode); goto restart; } /* * Now that locking is settled, determine dio flags and exclusivity * requirements. We don't use DIO_OVERWRITE_ONLY because we enforce * behavior already. The inode lock is already held exclusive if the * write is non-overwrite or extending, so drain all outstanding dio and * set the force wait dio flag. */ if (!*ilock_shared && (unaligned_io || *extend)) { if (iocb->ki_flags & IOCB_NOWAIT) { ret = -EAGAIN; goto out; } if (unaligned_io && (!overwrite || *unwritten)) inode_dio_wait(inode); *dio_flags = IOMAP_DIO_FORCE_WAIT; } ret = file_modified(file); if (ret < 0) goto out; return count; out: if (*ilock_shared) inode_unlock_shared(inode); else inode_unlock(inode); return ret; } static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; handle_t *handle; struct inode *inode = file_inode(iocb->ki_filp); loff_t offset = iocb->ki_pos; size_t count = iov_iter_count(from); const struct iomap_ops *iomap_ops = &ext4_iomap_ops; bool extend = false, unwritten = false; bool ilock_shared = true; int dio_flags = 0; /* * Quick check here without any i_rwsem lock to see if it is extending * IO. A more reliable check is done in ext4_dio_write_checks() with * proper locking in place. */ if (offset + count > i_size_read(inode)) ilock_shared = false; if (iocb->ki_flags & IOCB_NOWAIT) { if (ilock_shared) { if (!inode_trylock_shared(inode)) return -EAGAIN; } else { if (!inode_trylock(inode)) return -EAGAIN; } } else { if (ilock_shared) inode_lock_shared(inode); else inode_lock(inode); } /* Fallback to buffered I/O if the inode does not support direct I/O. */ if (!ext4_should_use_dio(iocb, from)) { if (ilock_shared) inode_unlock_shared(inode); else inode_unlock(inode); return ext4_buffered_write_iter(iocb, from); } /* * Prevent inline data from being created since we are going to allocate * blocks for DIO. We know the inode does not currently have inline data * because ext4_should_use_dio() checked for it, but we have to clear * the state flag before the write checks because a lock cycle could * introduce races with other writers. */ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend, &unwritten, &dio_flags); if (ret <= 0) return ret; offset = iocb->ki_pos; count = ret; if (extend) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext4_orphan_add(handle, inode); if (ret) { ext4_journal_stop(handle); goto out; } ext4_journal_stop(handle); } if (ilock_shared && !unwritten) iomap_ops = &ext4_iomap_overwrite_ops; ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops, dio_flags, NULL, 0); if (ret == -ENOTBLK) ret = 0; if (extend) { /* * We always perform extending DIO write synchronously so by * now the IO is completed and ext4_handle_inode_extension() * was called. Cleanup the inode in case of error or race with * writeback of delalloc blocks. */ WARN_ON_ONCE(ret == -EIOCBQUEUED); ext4_inode_extension_cleanup(inode, ret); } out: if (ilock_shared) inode_unlock_shared(inode); else inode_unlock(inode); if (ret >= 0 && iov_iter_count(from)) { ssize_t err; loff_t endbyte; offset = iocb->ki_pos; err = ext4_buffered_write_iter(iocb, from); if (err < 0) return err; /* * We need to ensure that the pages within the page cache for * the range covered by this I/O are written to disk and * invalidated. This is in attempt to preserve the expected * direct I/O semantics in the case we fallback to buffered I/O * to complete off the I/O request. */ ret += err; endbyte = offset + err - 1; err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping, offset, endbyte); if (!err) invalidate_mapping_pages(iocb->ki_filp->f_mapping, offset >> PAGE_SHIFT, endbyte >> PAGE_SHIFT); } return ret; } #ifdef CONFIG_FS_DAX static ssize_t ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; size_t count; loff_t offset; handle_t *handle; bool extend = false; struct inode *inode = file_inode(iocb->ki_filp); if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock(inode)) return -EAGAIN; } else { inode_lock(inode); } ret = ext4_write_checks(iocb, from); if (ret <= 0) goto out; offset = iocb->ki_pos; count = iov_iter_count(from); if (offset + count > EXT4_I(inode)->i_disksize) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext4_orphan_add(handle, inode); if (ret) { ext4_journal_stop(handle); goto out; } extend = true; ext4_journal_stop(handle); } ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); if (extend) { ret = ext4_handle_inode_extension(inode, offset, ret); ext4_inode_extension_cleanup(inode, ret); } out: inode_unlock(inode); if (ret > 0) ret = generic_write_sync(iocb, ret); return ret; } #endif static ssize_t ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; #ifdef CONFIG_FS_DAX if (IS_DAX(inode)) return ext4_dax_write_iter(iocb, from); #endif if (iocb->ki_flags & IOCB_DIRECT) return ext4_dio_write_iter(iocb, from); else return ext4_buffered_write_iter(iocb, from); } #ifdef CONFIG_FS_DAX static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) { int error = 0; vm_fault_t result; int retries = 0; handle_t *handle = NULL; struct inode *inode = file_inode(vmf->vma->vm_file); struct super_block *sb = inode->i_sb; /* * We have to distinguish real writes from writes which will result in a * COW page; COW writes should *not* poke the journal (the file will not * be changed). Doing so would cause unintended failures when mounted * read-only. * * We check for VM_SHARED rather than vmf->cow_page since the latter is * unset for order != 0 (i.e. only in do_cow_fault); for * other sizes, dax_iomap_fault will handle splitting / fallback so that * we eventually come back with a COW page. */ bool write = (vmf->flags & FAULT_FLAG_WRITE) && (vmf->vma->vm_flags & VM_SHARED); struct address_space *mapping = vmf->vma->vm_file->f_mapping; pfn_t pfn; if (write) { sb_start_pagefault(sb); file_update_time(vmf->vma->vm_file); filemap_invalidate_lock_shared(mapping); retry: handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); if (IS_ERR(handle)) { filemap_invalidate_unlock_shared(mapping); sb_end_pagefault(sb); return VM_FAULT_SIGBUS; } } else { filemap_invalidate_lock_shared(mapping); } result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops); if (write) { ext4_journal_stop(handle); if ((result & VM_FAULT_ERROR) && error == -ENOSPC && ext4_should_retry_alloc(sb, &retries)) goto retry; /* Handling synchronous page fault? */ if (result & VM_FAULT_NEEDDSYNC) result = dax_finish_sync_fault(vmf, order, pfn); filemap_invalidate_unlock_shared(mapping); sb_end_pagefault(sb); } else { filemap_invalidate_unlock_shared(mapping); } return result; } static vm_fault_t ext4_dax_fault(struct vm_fault *vmf) { return ext4_dax_huge_fault(vmf, 0); } static const struct vm_operations_struct ext4_dax_vm_ops = { .fault = ext4_dax_fault, .huge_fault = ext4_dax_huge_fault, .page_mkwrite = ext4_dax_fault, .pfn_mkwrite = ext4_dax_fault, }; #else #define ext4_dax_vm_ops ext4_file_vm_ops #endif static const struct vm_operations_struct ext4_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = ext4_page_mkwrite, }; static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_mapping->host; struct dax_device *dax_dev = EXT4_SB(inode->i_sb)->s_daxdev; if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; /* * We don't support synchronous mappings for non-DAX files and * for DAX files if underneath dax_device is not synchronous. */ if (!daxdev_mapping_supported(vma, dax_dev)) return -EOPNOTSUPP; file_accessed(file); if (IS_DAX(file_inode(file))) { vma->vm_ops = &ext4_dax_vm_ops; vm_flags_set(vma, VM_HUGEPAGE); } else { vma->vm_ops = &ext4_file_vm_ops; } return 0; } static int ext4_sample_last_mounted(struct super_block *sb, struct vfsmount *mnt) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct path path; char buf[64], *cp; handle_t *handle; int err; if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED))) return 0; if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb)) return 0; ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED); /* * Sample where the filesystem has been mounted and * store it in the superblock for sysadmin convenience * when trying to sort through large numbers of block * devices or filesystem images. */ memset(buf, 0, sizeof(buf)); path.mnt = mnt; path.dentry = mnt->mnt_root; cp = d_path(&path, buf, sizeof(buf)); err = 0; if (IS_ERR(cp)) goto out; handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); err = PTR_ERR(handle); if (IS_ERR(handle)) goto out; BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sb, sbi->s_sbh, EXT4_JTR_NONE); if (err) goto out_journal; lock_buffer(sbi->s_sbh); strtomem_pad(sbi->s_es->s_last_mounted, cp, 0); ext4_superblock_csum_set(sb); unlock_buffer(sbi->s_sbh); ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); out_journal: ext4_journal_stop(handle); out: sb_end_intwrite(sb); return err; } static int ext4_file_open(struct inode *inode, struct file *filp) { int ret; if (unlikely(ext4_forced_shutdown(inode->i_sb))) return -EIO; ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt); if (ret) return ret; ret = fscrypt_file_open(inode, filp); if (ret) return ret; ret = fsverity_file_open(inode, filp); if (ret) return ret; /* * Set up the jbd2_inode if we are opening the inode for * writing and the journal is present */ if (filp->f_mode & FMODE_WRITE) { ret = ext4_inode_attach_jinode(inode); if (ret < 0) return ret; } filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; return dquot_file_open(inode, filp); } /* * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values * by calling generic_file_llseek_size() with the appropriate maxbytes * value for each. */ loff_t ext4_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; else maxbytes = inode->i_sb->s_maxbytes; switch (whence) { default: return generic_file_llseek_size(file, offset, whence, maxbytes, i_size_read(inode)); case SEEK_HOLE: inode_lock_shared(inode); offset = iomap_seek_hole(inode, offset, &ext4_iomap_report_ops); inode_unlock_shared(inode); break; case SEEK_DATA: inode_lock_shared(inode); offset = iomap_seek_data(inode, offset, &ext4_iomap_report_ops); inode_unlock_shared(inode); break; } if (offset < 0) return offset; return vfs_setpos(file, offset, maxbytes); } const struct file_operations ext4_file_operations = { .llseek = ext4_llseek, .read_iter = ext4_file_read_iter, .write_iter = ext4_file_write_iter, .iopoll = iocb_bio_iopoll, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, #endif .mmap = ext4_file_mmap, .open = ext4_file_open, .release = ext4_release_file, .fsync = ext4_sync_file, .get_unmapped_area = thp_get_unmapped_area, .splice_read = ext4_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = ext4_fallocate, .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC | FOP_DIO_PARALLEL_WRITE, }; const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_file_getattr, .listxattr = ext4_listxattr, .get_inode_acl = ext4_get_acl, .set_acl = ext4_set_acl, .fiemap = ext4_fiemap, .fileattr_get = ext4_fileattr_get, .fileattr_set = ext4_fileattr_set, };
13 11 4 11 4 11 7 5 11 13 12 9 2 7 6 11 5 10 11 8 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 /* * Poly1305 authenticator algorithm, RFC7539 * * Copyright (C) 2015 Martin Willi * * Based on public domain code by Andrew Moon and Daniel J. Bernstein. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/internal/poly1305.h> #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/unaligned.h> static int crypto_poly1305_init(struct shash_desc *desc) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); poly1305_core_init(&dctx->h); dctx->buflen = 0; dctx->rset = 0; dctx->sset = false; return 0; } static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { if (!dctx->sset) { if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { poly1305_core_setkey(&dctx->core_r, src); src += POLY1305_BLOCK_SIZE; srclen -= POLY1305_BLOCK_SIZE; dctx->rset = 2; } if (srclen >= POLY1305_BLOCK_SIZE) { dctx->s[0] = get_unaligned_le32(src + 0); dctx->s[1] = get_unaligned_le32(src + 4); dctx->s[2] = get_unaligned_le32(src + 8); dctx->s[3] = get_unaligned_le32(src + 12); src += POLY1305_BLOCK_SIZE; srclen -= POLY1305_BLOCK_SIZE; dctx->sset = true; } } return srclen; } static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen) { unsigned int datalen; if (unlikely(!dctx->sset)) { datalen = crypto_poly1305_setdesckey(dctx, src, srclen); src += srclen - datalen; srclen = datalen; } poly1305_core_blocks(&dctx->h, &dctx->core_r, src, srclen / POLY1305_BLOCK_SIZE, 1); } static int crypto_poly1305_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); unsigned int bytes; if (unlikely(dctx->buflen)) { bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); memcpy(dctx->buf + dctx->buflen, src, bytes); src += bytes; srclen -= bytes; dctx->buflen += bytes; if (dctx->buflen == POLY1305_BLOCK_SIZE) { poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE); dctx->buflen = 0; } } if (likely(srclen >= POLY1305_BLOCK_SIZE)) { poly1305_blocks(dctx, src, srclen); src += srclen - (srclen % POLY1305_BLOCK_SIZE); srclen %= POLY1305_BLOCK_SIZE; } if (unlikely(srclen)) { dctx->buflen = srclen; memcpy(dctx->buf, src, srclen); } return 0; } static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); if (unlikely(!dctx->sset)) return -ENOKEY; poly1305_final_generic(dctx, dst); return 0; } static struct shash_alg poly1305_alg = { .digestsize = POLY1305_DIGEST_SIZE, .init = crypto_poly1305_init, .update = crypto_poly1305_update, .final = crypto_poly1305_final, .descsize = sizeof(struct poly1305_desc_ctx), .base = { .cra_name = "poly1305", .cra_driver_name = "poly1305-generic", .cra_priority = 100, .cra_blocksize = POLY1305_BLOCK_SIZE, .cra_module = THIS_MODULE, }, }; static int __init poly1305_mod_init(void) { return crypto_register_shash(&poly1305_alg); } static void __exit poly1305_mod_exit(void) { crypto_unregister_shash(&poly1305_alg); } subsys_initcall(poly1305_mod_init); module_exit(poly1305_mod_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); MODULE_DESCRIPTION("Poly1305 authenticator"); MODULE_ALIAS_CRYPTO("poly1305"); MODULE_ALIAS_CRYPTO("poly1305-generic");
25 9 111 111 110 87 2 80 1 4 2 7 3 6 3 6 12 16 13 16 46 2 5 6 43 43 43 43 42 2 41 41 12 29 1 1 1 1 1 1 1 35 35 36 35 2 34 3 3 1 1 22 1 21 1 5 2 3 3 6 1 4 2 1 2 2 1 1 2 2 2 8 73 73 70 71 69 36 34 16 11 3 13 13 16 16 16 87 55 32 23 9 31 56 25 25 4 3 1 2 2 1 3 4 66 66 70 54 13 5 16 4 17 5 15 4 12 60 4 2 66 66 66 66 66 66 66 66 66 66 66 66 118 98 15 5 1612 1566 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 // SPDX-License-Identifier: GPL-2.0-or-later /* * Extension Header handling for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Andi Kleen <ak@muc.de> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ /* Changes: * yoshfuji : ensure not to overrun while parsing * tlv options. * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs(). * YOSHIFUJI Hideaki @USAGI Register inbound extension header * handlers as inet6_protocol{}. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/slab.h> #include <linux/export.h> #include <net/dst.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/calipso.h> #if IS_ENABLED(CONFIG_IPV6_MIP6) #include <net/xfrm.h> #endif #include <linux/seg6.h> #include <net/seg6.h> #ifdef CONFIG_IPV6_SEG6_HMAC #include <net/seg6_hmac.h> #endif #include <net/rpl.h> #include <linux/ioam6.h> #include <linux/ioam6_genl.h> #include <net/ioam6.h> #include <net/dst_metadata.h> #include <linux/uaccess.h> /********************* Generic functions *********************/ /* An unknown option is detected, decide what to do */ static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff, bool disallow_unknowns) { if (disallow_unknowns) { /* If unknown TLVs are disallowed by configuration * then always silently drop packet. Note this also * means no ICMP parameter problem is sent which * could be a good property to mitigate a reflection DOS * attack. */ goto drop; } switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { case 0: /* ignore */ return true; case 1: /* drop packet */ break; case 3: /* Send ICMP if not a multicast address and drop packet */ /* Actually, it is redundant check. icmp_send will recheck in any case. */ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) break; fallthrough; case 2: /* send ICMP PARM PROB regardless and drop packet */ icmpv6_param_prob_reason(skb, ICMPV6_UNK_OPTION, optoff, SKB_DROP_REASON_UNHANDLED_PROTO); return false; } drop: kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO); return false; } static bool ipv6_hop_ra(struct sk_buff *skb, int optoff); static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff); static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff); static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff); #if IS_ENABLED(CONFIG_IPV6_MIP6) static bool ipv6_dest_hao(struct sk_buff *skb, int optoff); #endif /* Parse tlv encoded option header (hop-by-hop or destination) */ static bool ip6_parse_tlv(bool hopbyhop, struct sk_buff *skb, int max_count) { int len = (skb_transport_header(skb)[1] + 1) << 3; const unsigned char *nh = skb_network_header(skb); int off = skb_network_header_len(skb); bool disallow_unknowns = false; int tlv_count = 0; int padlen = 0; if (unlikely(max_count < 0)) { disallow_unknowns = true; max_count = -max_count; } off += 2; len -= 2; while (len > 0) { int optlen, i; if (nh[off] == IPV6_TLV_PAD1) { padlen++; if (padlen > 7) goto bad; off++; len--; continue; } if (len < 2) goto bad; optlen = nh[off + 1] + 2; if (optlen > len) goto bad; if (nh[off] == IPV6_TLV_PADN) { /* RFC 2460 states that the purpose of PadN is * to align the containing header to multiples * of 8. 7 is therefore the highest valid value. * See also RFC 4942, Section 2.1.9.5. */ padlen += optlen; if (padlen > 7) goto bad; /* RFC 4942 recommends receiving hosts to * actively check PadN payload to contain * only zeroes. */ for (i = 2; i < optlen; i++) { if (nh[off + i] != 0) goto bad; } } else { tlv_count++; if (tlv_count > max_count) goto bad; if (hopbyhop) { switch (nh[off]) { case IPV6_TLV_ROUTERALERT: if (!ipv6_hop_ra(skb, off)) return false; break; case IPV6_TLV_IOAM: if (!ipv6_hop_ioam(skb, off)) return false; nh = skb_network_header(skb); break; case IPV6_TLV_JUMBO: if (!ipv6_hop_jumbo(skb, off)) return false; break; case IPV6_TLV_CALIPSO: if (!ipv6_hop_calipso(skb, off)) return false; break; default: if (!ip6_tlvopt_unknown(skb, off, disallow_unknowns)) return false; break; } } else { switch (nh[off]) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_TLV_HAO: if (!ipv6_dest_hao(skb, off)) return false; break; #endif default: if (!ip6_tlvopt_unknown(skb, off, disallow_unknowns)) return false; break; } } padlen = 0; } off += optlen; len -= optlen; } if (len == 0) return true; bad: kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } /***************************** Destination options header. *****************************/ #if IS_ENABLED(CONFIG_IPV6_MIP6) static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) { struct ipv6_destopt_hao *hao; struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb); SKB_DR(reason); int ret; if (opt->dsthao) { net_dbg_ratelimited("hao duplicated\n"); goto discard; } opt->dsthao = opt->dst1; opt->dst1 = 0; hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); if (hao->length != 16) { net_dbg_ratelimited("hao invalid option length = %d\n", hao->length); SKB_DR_SET(reason, IP_INHDR); goto discard; } if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { net_dbg_ratelimited("hao is not an unicast addr: %pI6\n", &hao->addr); SKB_DR_SET(reason, INVALID_PROTO); goto discard; } ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr, (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS); if (unlikely(ret < 0)) { SKB_DR_SET(reason, XFRM_POLICY); goto discard; } if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto discard; /* update all variable using below by copied skbuff */ hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); ipv6h = ipv6_hdr(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; swap(ipv6h->saddr, hao->addr); if (skb->tstamp == 0) __net_timestamp(skb); return true; discard: kfree_skb_reason(skb, reason); return false; } #endif static int ipv6_destopt_rcv(struct sk_buff *skb) { struct inet6_dev *idev = __in6_dev_get(skb->dev); struct inet6_skb_parm *opt = IP6CB(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) __u16 dstbuf; #endif struct dst_entry *dst = skb_dst(skb); struct net *net = dev_net(skb->dev); int extlen; if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { __IP6_INC_STATS(dev_net(dst->dev), idev, IPSTATS_MIB_INHDRERRORS); fail_and_free: kfree_skb(skb); return -1; } extlen = (skb_transport_header(skb)[1] + 1) << 3; if (extlen > net->ipv6.sysctl.max_dst_opts_len) goto fail_and_free; opt->lastopt = opt->dst1 = skb_network_header_len(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) dstbuf = opt->dst1; #endif if (ip6_parse_tlv(false, skb, net->ipv6.sysctl.max_dst_opts_cnt)) { skb->transport_header += extlen; opt = IP6CB(skb); #if IS_ENABLED(CONFIG_IPV6_MIP6) opt->nhoff = dstbuf; #else opt->nhoff = opt->dst1; #endif return 1; } __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); return -1; } static void seg6_update_csum(struct sk_buff *skb) { struct ipv6_sr_hdr *hdr; struct in6_addr *addr; __be32 from, to; /* srh is at transport offset and seg_left is already decremented * but daddr is not yet updated with next segment */ hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); addr = hdr->segments + hdr->segments_left; hdr->segments_left++; from = *(__be32 *)hdr; hdr->segments_left--; to = *(__be32 *)hdr; /* update skb csum with diff resulting from seg_left decrement */ update_csum_diff4(skb, from, to); /* compute csum diff between current and next segment and update */ update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr), (__be32 *)addr); } static int ipv6_srh_rcv(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(skb->dev); struct ipv6_sr_hdr *hdr; struct inet6_dev *idev; struct in6_addr *addr; int accept_seg6; hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); idev = __in6_dev_get(skb->dev); accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled), READ_ONCE(idev->cnf.seg6_enabled)); if (!accept_seg6) { kfree_skb(skb); return -1; } #ifdef CONFIG_IPV6_SEG6_HMAC if (!seg6_hmac_validate_skb(skb)) { kfree_skb(skb); return -1; } #endif looped_back: if (hdr->segments_left == 0) { if (hdr->nexthdr == NEXTHDR_IPV6 || hdr->nexthdr == NEXTHDR_IPV4) { int offset = (hdr->hdrlen + 1) << 3; skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); skb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->encapsulation = 0; if (hdr->nexthdr == NEXTHDR_IPV4) skb->protocol = htons(ETH_P_IP); __skb_tunnel_rx(skb, skb->dev, net); netif_rx(skb); return -1; } opt->srcrt = skb_network_header_len(skb); opt->lastopt = opt->srcrt; skb->transport_header += (hdr->hdrlen + 1) << 3; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } if (hdr->segments_left >= (hdr->hdrlen >> 1)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } if (skb_cloned(skb)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -1; } hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); } hdr->segments_left--; addr = hdr->segments + hdr->segments_left; skb_push(skb, sizeof(struct ipv6hdr)); if (skb->ip_summed == CHECKSUM_COMPLETE) seg6_update_csum(skb); ipv6_hdr(skb)->daddr = *addr; ip6_route_input(skb); if (skb_dst(skb)->error) { dst_input(skb); return -1; } if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; skb_pull(skb, sizeof(struct ipv6hdr)); goto looped_back; } dst_input(skb); return -1; } static int ipv6_rpl_srh_rcv(struct sk_buff *skb) { struct ipv6_rpl_sr_hdr *hdr, *ohdr, *chdr; struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(skb->dev); struct inet6_dev *idev; struct ipv6hdr *oldhdr; unsigned char *buf; int accept_rpl_seg; int i, err; u64 n = 0; u32 r; idev = __in6_dev_get(skb->dev); accept_rpl_seg = net->ipv6.devconf_all->rpl_seg_enabled; if (accept_rpl_seg > idev->cnf.rpl_seg_enabled) accept_rpl_seg = idev->cnf.rpl_seg_enabled; if (!accept_rpl_seg) { kfree_skb(skb); return -1; } looped_back: hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb); if (hdr->segments_left == 0) { if (hdr->nexthdr == NEXTHDR_IPV6) { int offset = (hdr->hdrlen + 1) << 3; skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); skb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb->encapsulation = 0; __skb_tunnel_rx(skb, skb->dev, net); netif_rx(skb); return -1; } opt->srcrt = skb_network_header_len(skb); opt->lastopt = opt->srcrt; skb->transport_header += (hdr->hdrlen + 1) << 3; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre); r = do_div(n, (16 - hdr->cmpri)); /* checks if calculation was without remainder and n fits into * unsigned char which is segments_left field. Should not be * higher than that. */ if (r || (n + 1) > 255) { kfree_skb(skb); return -1; } if (hdr->segments_left > n + 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } hdr->segments_left--; i = n - hdr->segments_left; buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC); if (unlikely(!buf)) { kfree_skb(skb); return -1; } ohdr = (struct ipv6_rpl_sr_hdr *)buf; ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n); chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3)); if (ipv6_addr_is_multicast(&ohdr->rpl_segaddr[i])) { kfree_skb(skb); kfree(buf); return -1; } err = ipv6_chk_rpl_srh_loop(net, ohdr->rpl_segaddr, n + 1); if (err) { icmpv6_send(skb, ICMPV6_PARAMPROB, 0, 0); kfree_skb(skb); kfree(buf); return -1; } swap(ipv6_hdr(skb)->daddr, ohdr->rpl_segaddr[i]); ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n); oldhdr = ipv6_hdr(skb); skb_pull(skb, ((hdr->hdrlen + 1) << 3)); skb_postpull_rcsum(skb, oldhdr, sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3)); if (unlikely(!hdr->segments_left)) { if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0, GFP_ATOMIC)) { __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); kfree(buf); return -1; } oldhdr = ipv6_hdr(skb); } skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr)); skb_reset_network_header(skb); skb_mac_header_rebuild(skb); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); memmove(ipv6_hdr(skb), oldhdr, sizeof(struct ipv6hdr)); memcpy(skb_transport_header(skb), chdr, (chdr->hdrlen + 1) << 3); ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); skb_postpush_rcsum(skb, ipv6_hdr(skb), sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3)); kfree(buf); ip6_route_input(skb); if (skb_dst(skb)->error) { dst_input(skb); return -1; } if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; skb_pull(skb, sizeof(struct ipv6hdr)); goto looped_back; } dst_input(skb); return -1; } /******************************** Routing header. ********************************/ /* called with rcu_read_lock() */ static int ipv6_rthdr_rcv(struct sk_buff *skb) { struct inet6_dev *idev = __in6_dev_get(skb->dev); struct inet6_skb_parm *opt = IP6CB(skb); struct in6_addr *addr = NULL; int n, i; struct ipv6_rt_hdr *hdr; struct rt0_hdr *rthdr; struct net *net = dev_net(skb->dev); int accept_source_route; accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route); if (idev) accept_source_route = min(accept_source_route, READ_ONCE(idev->cnf.accept_source_route)); if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || !pskb_may_pull(skb, (skb_transport_offset(skb) + ((skb_transport_header(skb)[1] + 1) << 3)))) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || skb->pkt_type != PACKET_HOST) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } switch (hdr->type) { case IPV6_SRCRT_TYPE_4: /* segment routing */ return ipv6_srh_rcv(skb); case IPV6_SRCRT_TYPE_3: /* rpl segment routing */ return ipv6_rpl_srh_rcv(skb); default: break; } looped_back: if (hdr->segments_left == 0) { switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: /* Silently discard type 2 header unless it was * processed by own */ if (!addr) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } opt->lastopt = opt->srcrt = skb_network_header_len(skb); skb->transport_header += (hdr->hdrlen + 1) << 3; opt->dst0 = opt->dst1; opt->dst1 = 0; opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); return 1; } switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (accept_source_route < 0) goto unknown_rh; /* Silently discard invalid RTH type 2 */ if (hdr->hdrlen != 2 || hdr->segments_left != 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -1; } break; #endif default: goto unknown_rh; } /* * This is the routing header forwarding algorithm from * RFC 2460, page 16. */ n = hdr->hdrlen >> 1; if (hdr->segments_left > n) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((&hdr->segments_left) - skb_network_header(skb))); return -1; } /* We are about to mangle packet header. Be careful! Do not damage packets queued somewhere. */ if (skb_cloned(skb)) { /* the copy is a forwarded packet */ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -1; } hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb); } if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; i = n - --hdr->segments_left; rthdr = (struct rt0_hdr *) hdr; addr = rthdr->addr; addr += i - 1; switch (hdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr, IPPROTO_ROUTING) < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } break; #endif default: break; } if (ipv6_addr_is_multicast(addr)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS); kfree_skb(skb); return -1; } swap(*addr, ipv6_hdr(skb)->daddr); ip6_route_input(skb); if (skb_dst(skb)->error) { skb_push(skb, -skb_network_offset(skb)); dst_input(skb); return -1; } if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) { if (ipv6_hdr(skb)->hop_limit <= 1) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); kfree_skb(skb); return -1; } ipv6_hdr(skb)->hop_limit--; goto looped_back; } skb_push(skb, -skb_network_offset(skb)); dst_input(skb); return -1; unknown_rh: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb_network_header(skb)); return -1; } static const struct inet6_protocol rthdr_protocol = { .handler = ipv6_rthdr_rcv, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol destopt_protocol = { .handler = ipv6_destopt_rcv, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol nodata_protocol = { .handler = dst_discard, .flags = INET6_PROTO_NOPOLICY, }; int __init ipv6_exthdrs_init(void) { int ret; ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING); if (ret) goto out; ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS); if (ret) goto out_rthdr; ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE); if (ret) goto out_destopt; out: return ret; out_destopt: inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); out_rthdr: inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); goto out; }; void ipv6_exthdrs_exit(void) { inet6_del_protocol(&nodata_protocol, IPPROTO_NONE); inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS); inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING); } /********************************** Hop-by-hop options. **********************************/ /* Router Alert as of RFC 2711 */ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] == 2) { IP6CB(skb)->flags |= IP6SKB_ROUTERALERT; memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra)); return true; } net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n", nh[optoff + 1]); kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } /* IOAM */ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff) { struct ioam6_trace_hdr *trace; struct ioam6_namespace *ns; struct ioam6_hdr *hdr; /* Bad alignment (must be 4n-aligned) */ if (optoff & 3) goto drop; /* Ignore if IOAM is not enabled on ingress */ if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled)) goto ignore; /* Truncated Option header */ hdr = (struct ioam6_hdr *)(skb_network_header(skb) + optoff); if (hdr->opt_len < 2) goto drop; switch (hdr->type) { case IOAM6_TYPE_PREALLOC: /* Truncated Pre-allocated Trace header */ if (hdr->opt_len < 2 + sizeof(*trace)) goto drop; /* Malformed Pre-allocated Trace header */ trace = (struct ioam6_trace_hdr *)((u8 *)hdr + sizeof(*hdr)); if (hdr->opt_len < 2 + sizeof(*trace) + trace->remlen * 4) goto drop; /* Ignore if the IOAM namespace is unknown */ ns = ioam6_namespace(dev_net(skb->dev), trace->namespace_id); if (!ns) goto ignore; if (!skb_valid_dst(skb)) ip6_route_input(skb); /* About to mangle packet header */ if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len)) goto drop; /* Trace pointer may have changed */ trace = (struct ioam6_trace_hdr *)(skb_network_header(skb) + optoff + sizeof(*hdr)); ioam6_fill_trace_data(skb, ns, trace, true); ioam6_event(IOAM6_EVENT_TRACE, dev_net(skb->dev), GFP_ATOMIC, (void *)trace, hdr->opt_len - 2); break; default: break; } ignore: return true; drop: kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } /* Jumbo payload */ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); SKB_DR(reason); u32 pkt_len; if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", nh[optoff+1]); SKB_DR_SET(reason, IP_INHDR); goto drop; } pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); if (pkt_len <= IPV6_MAXPLEN) { icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff + 2, SKB_DROP_REASON_IP_INHDR); return false; } if (ipv6_hdr(skb)->payload_len) { icmpv6_param_prob_reason(skb, ICMPV6_HDR_FIELD, optoff, SKB_DROP_REASON_IP_INHDR); return false; } if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { SKB_DR_SET(reason, PKT_TOO_SMALL); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM; return true; drop: kfree_skb_reason(skb, reason); return false; } /* CALIPSO RFC 5570 */ static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff) { const unsigned char *nh = skb_network_header(skb); if (nh[optoff + 1] < 8) goto drop; if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1]) goto drop; if (!calipso_validate(skb, nh + optoff)) goto drop; return true; drop: kfree_skb_reason(skb, SKB_DROP_REASON_IP_INHDR); return false; } int ipv6_parse_hopopts(struct sk_buff *skb) { struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(skb->dev); int extlen; /* * skb_network_header(skb) is equal to skb->data, and * skb_network_header_len(skb) is always equal to * sizeof(struct ipv6hdr) by definition of * hop-by-hop options. */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) || !pskb_may_pull(skb, (sizeof(struct ipv6hdr) + ((skb_transport_header(skb)[1] + 1) << 3)))) { fail_and_free: kfree_skb(skb); return -1; } extlen = (skb_transport_header(skb)[1] + 1) << 3; if (extlen > net->ipv6.sysctl.max_hbh_opts_len) goto fail_and_free; opt->flags |= IP6SKB_HOPBYHOP; if (ip6_parse_tlv(true, skb, net->ipv6.sysctl.max_hbh_opts_cnt)) { skb->transport_header += extlen; opt = IP6CB(skb); opt->nhoff = sizeof(struct ipv6hdr); return 1; } return -1; } /* * Creating outbound headers. * * "build" functions work when skb is filled from head to tail (datagram) * "push" functions work when headers are added from tail to head (tcp) * * In both cases we assume, that caller reserved enough room * for headers. */ static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p, struct in6_addr *saddr) { struct rt0_hdr *phdr, *ihdr; int hops; ihdr = (struct rt0_hdr *) opt; phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3); memcpy(phdr, ihdr, sizeof(struct rt0_hdr)); hops = ihdr->rt_hdr.hdrlen >> 1; if (hops > 1) memcpy(phdr->addr, ihdr->addr + 1, (hops - 1) * sizeof(struct in6_addr)); phdr->addr[hops - 1] = **addr_p; *addr_p = ihdr->addr; phdr->rt_hdr.nexthdr = *proto; *proto = NEXTHDR_ROUTING; } static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p, struct in6_addr *saddr) { struct ipv6_sr_hdr *sr_phdr, *sr_ihdr; int plen, hops; sr_ihdr = (struct ipv6_sr_hdr *)opt; plen = (sr_ihdr->hdrlen + 1) << 3; sr_phdr = skb_push(skb, plen); memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr)); hops = sr_ihdr->first_segment + 1; memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1, (hops - 1) * sizeof(struct in6_addr)); sr_phdr->segments[0] = **addr_p; *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; if (sr_ihdr->hdrlen > hops * 2) { int tlvs_offset, tlvs_length; tlvs_offset = (1 + hops * 2) << 3; tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3; memcpy((char *)sr_phdr + tlvs_offset, (char *)sr_ihdr + tlvs_offset, tlvs_length); } #ifdef CONFIG_IPV6_SEG6_HMAC if (sr_has_hmac(sr_phdr)) { struct net *net = NULL; if (skb->dev) net = dev_net(skb->dev); else if (skb->sk) net = sock_net(skb->sk); WARN_ON(!net); if (net) seg6_push_hmac(net, saddr, sr_phdr); } #endif sr_phdr->nexthdr = *proto; *proto = NEXTHDR_ROUTING; } static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, struct ipv6_rt_hdr *opt, struct in6_addr **addr_p, struct in6_addr *saddr) { switch (opt->type) { case IPV6_SRCRT_TYPE_0: case IPV6_SRCRT_STRICT: case IPV6_SRCRT_TYPE_2: ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr); break; case IPV6_SRCRT_TYPE_4: ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr); break; default: break; } } static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt) { struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt)); memcpy(h, opt, ipv6_optlen(opt)); h->nexthdr = *proto; *proto = type; } void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto, struct in6_addr **daddr, struct in6_addr *saddr) { if (opt->srcrt) { ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr); /* * IPV6_RTHDRDSTOPTS is ignored * unless IPV6_RTHDR is set (RFC3542). */ if (opt->dst0opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt); } if (opt->hopopt) ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); } void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) { if (opt->dst1opt) ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt); } EXPORT_SYMBOL(ipv6_push_frag_opts); struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) { struct ipv6_txoptions *opt2; opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); if (opt2) { long dif = (char *)opt2 - (char *)opt; memcpy(opt2, opt, opt->tot_len); if (opt2->hopopt) *((char **)&opt2->hopopt) += dif; if (opt2->dst0opt) *((char **)&opt2->dst0opt) += dif; if (opt2->dst1opt) *((char **)&opt2->dst1opt) += dif; if (opt2->srcrt) *((char **)&opt2->srcrt) += dif; refcount_set(&opt2->refcnt, 1); } return opt2; } EXPORT_SYMBOL_GPL(ipv6_dup_options); static void ipv6_renew_option(int renewtype, struct ipv6_opt_hdr **dest, struct ipv6_opt_hdr *old, struct ipv6_opt_hdr *new, int newtype, char **p) { struct ipv6_opt_hdr *src; src = (renewtype == newtype ? new : old); if (!src) return; memcpy(*p, src, ipv6_optlen(src)); *dest = (struct ipv6_opt_hdr *)*p; *p += CMSG_ALIGN(ipv6_optlen(*dest)); } /** * ipv6_renew_options - replace a specific ext hdr with a new one. * * @sk: sock from which to allocate memory * @opt: original options * @newtype: option type to replace in @opt * @newopt: new option of type @newtype to replace (user-mem) * * Returns a new set of options which is a copy of @opt with the * option type @newtype replaced with @newopt. * * @opt may be NULL, in which case a new set of options is returned * containing just @newopt. * * @newopt may be NULL, in which case the specified option type is * not copied into the new set of options. * * The new set of options is allocated from the socket option memory * buffer of @sk. */ struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, struct ipv6_opt_hdr *newopt) { int tot_len = 0; char *p; struct ipv6_txoptions *opt2; if (opt) { if (newtype != IPV6_HOPOPTS && opt->hopopt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt)); if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt)); if (newtype != IPV6_RTHDR && opt->srcrt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt)); if (newtype != IPV6_DSTOPTS && opt->dst1opt) tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt)); } if (newopt) tot_len += CMSG_ALIGN(ipv6_optlen(newopt)); if (!tot_len) return NULL; tot_len += sizeof(*opt2); opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC); if (!opt2) return ERR_PTR(-ENOBUFS); memset(opt2, 0, tot_len); refcount_set(&opt2->refcnt, 1); opt2->tot_len = tot_len; p = (char *)(opt2 + 1); ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt, (opt ? opt->hopopt : NULL), newopt, newtype, &p); ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt, (opt ? opt->dst0opt : NULL), newopt, newtype, &p); ipv6_renew_option(IPV6_RTHDR, (struct ipv6_opt_hdr **)&opt2->srcrt, (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL), newopt, newtype, &p); ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt, (opt ? opt->dst1opt : NULL), newopt, newtype, &p); opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) + (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) + (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0); opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0); return opt2; } struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt) { /* * ignore the dest before srcrt unless srcrt is being included. * --yoshfuji */ if (opt->dst0opt && !opt->srcrt) { if (opt_space != opt) { memcpy(opt_space, opt, sizeof(*opt_space)); opt = opt_space; } opt->opt_nflen -= ipv6_optlen(opt->dst0opt); opt->dst0opt = NULL; } return opt; } EXPORT_SYMBOL_GPL(__ipv6_fixup_options); /** * fl6_update_dst - update flowi destination address with info given * by srcrt option, if any. * * @fl6: flowi6 for which daddr is to be updated * @opt: struct ipv6_txoptions in which to look for srcrt opt * @orig: copy of original daddr address if modified * * Returns NULL if no txoptions or no srcrt, otherwise returns orig * and initial value of fl6->daddr set in orig */ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, const struct ipv6_txoptions *opt, struct in6_addr *orig) { if (!opt || !opt->srcrt) return NULL; *orig = fl6->daddr; switch (opt->srcrt->type) { case IPV6_SRCRT_TYPE_0: case IPV6_SRCRT_STRICT: case IPV6_SRCRT_TYPE_2: fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; break; case IPV6_SRCRT_TYPE_4: { struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt; fl6->daddr = srh->segments[srh->segments_left]; break; } default: return NULL; } return orig; } EXPORT_SYMBOL_GPL(fl6_update_dst);
4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_VXLAN_H #define __NET_VXLAN_H 1 #include <linux/if_vlan.h> #include <linux/rhashtable-types.h> #include <net/udp_tunnel.h> #include <net/dst_metadata.h> #include <net/rtnetlink.h> #include <net/switchdev.h> #include <net/nexthop.h> #define IANA_VXLAN_UDP_PORT 4789 #define IANA_VXLAN_GPE_UDP_PORT 4790 /* VXLAN protocol (RFC 7348) header: * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |R|R|R|R|I|R|R|R| Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | VXLAN Network Identifier (VNI) | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * I = VXLAN Network Identifier (VNI) present. */ struct vxlanhdr { __be32 vx_flags; __be32 vx_vni; }; /* VXLAN header flags. */ #define VXLAN_HF_VNI cpu_to_be32(BIT(27)) #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) #define VXLAN_VNI_MASK cpu_to_be32(VXLAN_VID_MASK << 8) #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) #define FDB_HASH_BITS 8 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS) /* Remote checksum offload for VXLAN (VXLAN_F_REMCSUM_[RT]X): * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |R|R|R|R|I|R|R|R|R|R|C| Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | VXLAN Network Identifier (VNI) |O| Csum start | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * C = Remote checksum offload bit. When set indicates that the * remote checksum offload data is present. * * O = Offset bit. Indicates the checksum offset relative to * checksum start. * * Csum start = Checksum start divided by two. * * http://tools.ietf.org/html/draft-herbert-vxlan-rco */ /* VXLAN-RCO header flags. */ #define VXLAN_HF_RCO cpu_to_be32(BIT(21)) /* Remote checksum offload header option */ #define VXLAN_RCO_MASK cpu_to_be32(0x7f) /* Last byte of vni field */ #define VXLAN_RCO_UDP cpu_to_be32(0x80) /* Indicate UDP RCO (TCP when not set *) */ #define VXLAN_RCO_SHIFT 1 /* Left shift of start */ #define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1) #define VXLAN_MAX_REMCSUM_START (0x7f << VXLAN_RCO_SHIFT) /* * VXLAN Group Based Policy Extension (VXLAN_F_GBP): * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | VXLAN Network Identifier (VNI) | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * G = Group Policy ID present. * * D = Don't Learn bit. When set, this bit indicates that the egress * VTEP MUST NOT learn the source address of the encapsulated frame. * * A = Indicates that the group policy has already been applied to * this packet. Policies MUST NOT be applied by devices when the * A bit is set. * * https://tools.ietf.org/html/draft-smith-vxlan-group-policy */ struct vxlanhdr_gbp { u8 vx_flags; #ifdef __LITTLE_ENDIAN_BITFIELD u8 reserved_flags1:3, policy_applied:1, reserved_flags2:2, dont_learn:1, reserved_flags3:1; #elif defined(__BIG_ENDIAN_BITFIELD) u8 reserved_flags1:1, dont_learn:1, reserved_flags2:2, policy_applied:1, reserved_flags3:3; #else #error "Please fix <asm/byteorder.h>" #endif __be16 policy_id; __be32 vx_vni; }; /* VXLAN-GBP header flags. */ #define VXLAN_HF_GBP cpu_to_be32(BIT(31)) #define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | cpu_to_be32(0xFFFFFF)) /* skb->mark mapping * * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |R|R|R|R|R|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ #define VXLAN_GBP_DONT_LEARN (BIT(6) << 16) #define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16) #define VXLAN_GBP_ID_MASK (0xFFFF) #define VXLAN_GBP_MASK (VXLAN_GBP_DONT_LEARN | VXLAN_GBP_POLICY_APPLIED | \ VXLAN_GBP_ID_MASK) /* * VXLAN Generic Protocol Extension (VXLAN_F_GPE): * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |R|R|Ver|I|P|R|O| Reserved |Next Protocol | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | VXLAN Network Identifier (VNI) | Reserved | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Ver = Version. Indicates VXLAN GPE protocol version. * * P = Next Protocol Bit. The P bit is set to indicate that the * Next Protocol field is present. * * O = OAM Flag Bit. The O bit is set to indicate that the packet * is an OAM packet. * * Next Protocol = This 8 bit field indicates the protocol header * immediately following the VXLAN GPE header. * * https://tools.ietf.org/html/draft-ietf-nvo3-vxlan-gpe-01 */ struct vxlanhdr_gpe { #if defined(__LITTLE_ENDIAN_BITFIELD) u8 oam_flag:1, reserved_flags1:1, np_applied:1, instance_applied:1, version:2, reserved_flags2:2; #elif defined(__BIG_ENDIAN_BITFIELD) u8 reserved_flags2:2, version:2, instance_applied:1, np_applied:1, reserved_flags1:1, oam_flag:1; #endif u8 reserved_flags3; u8 reserved_flags4; u8 next_protocol; __be32 vx_vni; }; /* VXLAN-GPE header flags. */ #define VXLAN_HF_VER cpu_to_be32(BIT(29) | BIT(28)) #define VXLAN_HF_NP cpu_to_be32(BIT(26)) #define VXLAN_HF_OAM cpu_to_be32(BIT(24)) #define VXLAN_GPE_USED_BITS (VXLAN_HF_VER | VXLAN_HF_NP | VXLAN_HF_OAM | \ cpu_to_be32(0xff)) struct vxlan_metadata { u32 gbp; }; /* per UDP socket information */ struct vxlan_sock { struct hlist_node hlist; struct socket *sock; struct hlist_head vni_list[VNI_HASH_SIZE]; refcount_t refcnt; u32 flags; }; union vxlan_addr { struct sockaddr_in sin; struct sockaddr_in6 sin6; struct sockaddr sa; }; struct vxlan_rdst { union vxlan_addr remote_ip; __be16 remote_port; u8 offloaded:1; __be32 remote_vni; u32 remote_ifindex; struct net_device *remote_dev; struct list_head list; struct rcu_head rcu; struct dst_cache dst_cache; }; struct vxlan_config { union vxlan_addr remote_ip; union vxlan_addr saddr; __be32 vni; int remote_ifindex; int mtu; __be16 dst_port; u16 port_min; u16 port_max; u8 tos; u8 ttl; __be32 label; enum ifla_vxlan_label_policy label_policy; u32 flags; unsigned long age_interval; unsigned int addrmax; bool no_share; enum ifla_vxlan_df df; }; enum { VXLAN_VNI_STATS_RX, VXLAN_VNI_STATS_RX_DROPS, VXLAN_VNI_STATS_RX_ERRORS, VXLAN_VNI_STATS_TX, VXLAN_VNI_STATS_TX_DROPS, VXLAN_VNI_STATS_TX_ERRORS, }; struct vxlan_vni_stats { u64 rx_packets; u64 rx_bytes; u64 rx_drops; u64 rx_errors; u64 tx_packets; u64 tx_bytes; u64 tx_drops; u64 tx_errors; }; struct vxlan_vni_stats_pcpu { struct vxlan_vni_stats stats; struct u64_stats_sync syncp; }; struct vxlan_dev_node { struct hlist_node hlist; struct vxlan_dev *vxlan; }; struct vxlan_vni_node { struct rhash_head vnode; struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */ #if IS_ENABLED(CONFIG_IPV6) struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */ #endif struct list_head vlist; __be32 vni; union vxlan_addr remote_ip; /* default remote ip for this vni */ struct vxlan_vni_stats_pcpu __percpu *stats; struct rcu_head rcu; }; struct vxlan_vni_group { struct rhashtable vni_hash; struct list_head vni_list; u32 num_vnis; }; /* Pseudo network device */ struct vxlan_dev { struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */ #if IS_ENABLED(CONFIG_IPV6) struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */ #endif struct list_head next; /* vxlan's per namespace list */ struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */ #if IS_ENABLED(CONFIG_IPV6) struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */ #endif struct net_device *dev; struct net *net; /* netns for packet i/o */ struct vxlan_rdst default_dst; /* default destination */ struct timer_list age_timer; spinlock_t hash_lock[FDB_HASH_SIZE]; unsigned int addrcnt; struct gro_cells gro_cells; struct vxlan_config cfg; struct vxlan_vni_group __rcu *vnigrp; struct hlist_head fdb_head[FDB_HASH_SIZE]; struct rhashtable mdb_tbl; struct hlist_head mdb_list; unsigned int mdb_seq; }; #define VXLAN_F_LEARN 0x01 #define VXLAN_F_PROXY 0x02 #define VXLAN_F_RSC 0x04 #define VXLAN_F_L2MISS 0x08 #define VXLAN_F_L3MISS 0x10 #define VXLAN_F_IPV6 0x20 #define VXLAN_F_UDP_ZERO_CSUM_TX 0x40 #define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80 #define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100 #define VXLAN_F_REMCSUM_TX 0x200 #define VXLAN_F_REMCSUM_RX 0x400 #define VXLAN_F_GBP 0x800 #define VXLAN_F_REMCSUM_NOPARTIAL 0x1000 #define VXLAN_F_COLLECT_METADATA 0x2000 #define VXLAN_F_GPE 0x4000 #define VXLAN_F_IPV6_LINKLOCAL 0x8000 #define VXLAN_F_TTL_INHERIT 0x10000 #define VXLAN_F_VNIFILTER 0x20000 #define VXLAN_F_MDB 0x40000 #define VXLAN_F_LOCALBYPASS 0x80000 /* Flags that are used in the receive path. These flags must match in * order for a socket to be shareable */ #define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \ VXLAN_F_GPE | \ VXLAN_F_UDP_ZERO_CSUM6_RX | \ VXLAN_F_REMCSUM_RX | \ VXLAN_F_REMCSUM_NOPARTIAL | \ VXLAN_F_COLLECT_METADATA | \ VXLAN_F_VNIFILTER) /* Flags that can be set together with VXLAN_F_GPE. */ #define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \ VXLAN_F_IPV6 | \ VXLAN_F_IPV6_LINKLOCAL | \ VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_UDP_ZERO_CSUM6_TX | \ VXLAN_F_UDP_ZERO_CSUM6_RX | \ VXLAN_F_COLLECT_METADATA | \ VXLAN_F_VNIFILTER | \ VXLAN_F_LOCALBYPASS) struct net_device *vxlan_dev_create(struct net *net, const char *name, u8 name_assign_type, struct vxlan_config *conf); static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, netdev_features_t features) { u8 l4_hdr = 0; if (!skb->encapsulation) return features; switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): l4_hdr = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: return features; } if ((l4_hdr == IPPROTO_UDP) && (skb->inner_protocol_type != ENCAP_TYPE_ETHER || skb->inner_protocol != htons(ETH_P_TEB) || (skb_inner_mac_header(skb) - skb_transport_header(skb) != sizeof(struct udphdr) + sizeof(struct vxlanhdr)) || (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto)))) return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); return features; } static inline int vxlan_headroom(u32 flags) { /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */ /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */ return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr)) + sizeof(struct udphdr) + sizeof(struct vxlanhdr) + (flags & VXLAN_F_GPE ? 0 : ETH_HLEN); } static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb) { return (struct vxlanhdr *)(udp_hdr(skb) + 1); } static inline __be32 vxlan_vni(__be32 vni_field) { #if defined(__BIG_ENDIAN) return (__force __be32)((__force u32)vni_field >> 8); #else return (__force __be32)((__force u32)(vni_field & VXLAN_VNI_MASK) << 8); #endif } static inline __be32 vxlan_vni_field(__be32 vni) { #if defined(__BIG_ENDIAN) return (__force __be32)((__force u32)vni << 8); #else return (__force __be32)((__force u32)vni >> 8); #endif } static inline size_t vxlan_rco_start(__be32 vni_field) { return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; } static inline size_t vxlan_rco_offset(__be32 vni_field) { return (vni_field & VXLAN_RCO_UDP) ? offsetof(struct udphdr, check) : offsetof(struct tcphdr, check); } static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset) { __be32 vni_field = cpu_to_be32(start >> VXLAN_RCO_SHIFT); if (offset == offsetof(struct udphdr, check)) vni_field |= VXLAN_RCO_UDP; return vni_field; } static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs) { return vs->sock->sk->sk_family; } #if IS_ENABLED(CONFIG_IPV6) static inline bool vxlan_addr_any(const union vxlan_addr *ipa) { if (ipa->sa.sa_family == AF_INET6) return ipv6_addr_any(&ipa->sin6.sin6_addr); else return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); } static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) { if (ipa->sa.sa_family == AF_INET6) return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); else return ipv4_is_multicast(ipa->sin.sin_addr.s_addr); } #else /* !IS_ENABLED(CONFIG_IPV6) */ static inline bool vxlan_addr_any(const union vxlan_addr *ipa) { return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); } static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) { return ipv4_is_multicast(ipa->sin.sin_addr.s_addr); } #endif /* IS_ENABLED(CONFIG_IPV6) */ static inline bool netif_is_vxlan(const struct net_device *dev) { return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "vxlan"); } struct switchdev_notifier_vxlan_fdb_info { struct switchdev_notifier_info info; /* must be first */ union vxlan_addr remote_ip; __be16 remote_port; __be32 remote_vni; u32 remote_ifindex; u8 eth_addr[ETH_ALEN]; __be32 vni; bool offloaded; bool added_by_user; }; #if IS_ENABLED(CONFIG_VXLAN) int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, struct switchdev_notifier_vxlan_fdb_info *fdb_info); int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, struct notifier_block *nb, struct netlink_ext_ack *extack); void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni); #else static inline int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, struct switchdev_notifier_vxlan_fdb_info *fdb_info) { return -ENOENT; } static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, struct notifier_block *nb, struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } static inline void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni) { } #endif static inline void vxlan_flag_attr_error(int attrtype, struct netlink_ext_ack *extack) { #define VXLAN_FLAG(flg) \ case IFLA_VXLAN_##flg: \ NL_SET_ERR_MSG_MOD(extack, \ "cannot change " #flg " flag"); \ break switch (attrtype) { VXLAN_FLAG(TTL_INHERIT); VXLAN_FLAG(LEARNING); VXLAN_FLAG(PROXY); VXLAN_FLAG(RSC); VXLAN_FLAG(L2MISS); VXLAN_FLAG(L3MISS); VXLAN_FLAG(COLLECT_METADATA); VXLAN_FLAG(UDP_ZERO_CSUM6_TX); VXLAN_FLAG(UDP_ZERO_CSUM6_RX); VXLAN_FLAG(REMCSUM_TX); VXLAN_FLAG(REMCSUM_RX); VXLAN_FLAG(GBP); VXLAN_FLAG(GPE); VXLAN_FLAG(REMCSUM_NOPARTIAL); default: NL_SET_ERR_MSG_MOD(extack, \ "cannot change flag"); break; } #undef VXLAN_FLAG } static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh, u32 hash, struct vxlan_rdst *rdst) { struct fib_nh_common *nhc; nhc = nexthop_path_fdb_result(nh, hash >> 1); if (unlikely(!nhc)) return false; switch (nhc->nhc_gw_family) { case AF_INET: rdst->remote_ip.sin.sin_addr.s_addr = nhc->nhc_gw.ipv4; rdst->remote_ip.sa.sa_family = AF_INET; break; case AF_INET6: rdst->remote_ip.sin6.sin6_addr = nhc->nhc_gw.ipv6; rdst->remote_ip.sa.sa_family = AF_INET6; break; } return true; } static inline void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, const struct vxlan_metadata *md) { struct vxlanhdr_gbp *gbp; if (!md->gbp) return; gbp = (struct vxlanhdr_gbp *)vxh; vxh->vx_flags |= VXLAN_HF_GBP; if (md->gbp & VXLAN_GBP_DONT_LEARN) gbp->dont_learn = 1; if (md->gbp & VXLAN_GBP_POLICY_APPLIED) gbp->policy_applied = 1; gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); } #endif
19 16 16 6 6 6 16 16 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2005 Mike Isely <isely@pobox.com> */ #include "pvrusb2-context.h" #include "pvrusb2-io.h" #include "pvrusb2-ioread.h" #include "pvrusb2-hdw.h" #include "pvrusb2-debug.h" #include <linux/wait.h> #include <linux/kthread.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/slab.h> static struct pvr2_context *pvr2_context_exist_first; static struct pvr2_context *pvr2_context_exist_last; static struct pvr2_context *pvr2_context_notify_first; static struct pvr2_context *pvr2_context_notify_last; static DEFINE_MUTEX(pvr2_context_mutex); static DECLARE_WAIT_QUEUE_HEAD(pvr2_context_sync_data); static DECLARE_WAIT_QUEUE_HEAD(pvr2_context_cleanup_data); static int pvr2_context_cleanup_flag; static int pvr2_context_cleaned_flag; static struct task_struct *pvr2_context_thread_ptr; static void pvr2_context_set_notify(struct pvr2_context *mp, int fl) { int signal_flag = 0; mutex_lock(&pvr2_context_mutex); if (fl) { if (!mp->notify_flag) { signal_flag = (pvr2_context_notify_first == NULL); mp->notify_prev = pvr2_context_notify_last; mp->notify_next = NULL; pvr2_context_notify_last = mp; if (mp->notify_prev) { mp->notify_prev->notify_next = mp; } else { pvr2_context_notify_first = mp; } mp->notify_flag = !0; } } else { if (mp->notify_flag) { mp->notify_flag = 0; if (mp->notify_next) { mp->notify_next->notify_prev = mp->notify_prev; } else { pvr2_context_notify_last = mp->notify_prev; } if (mp->notify_prev) { mp->notify_prev->notify_next = mp->notify_next; } else { pvr2_context_notify_first = mp->notify_next; } } } mutex_unlock(&pvr2_context_mutex); if (signal_flag) wake_up(&pvr2_context_sync_data); } static void pvr2_context_destroy(struct pvr2_context *mp) { pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context %p (destroy)",mp); pvr2_hdw_destroy(mp->hdw); pvr2_context_set_notify(mp, 0); mutex_lock(&pvr2_context_mutex); if (mp->exist_next) { mp->exist_next->exist_prev = mp->exist_prev; } else { pvr2_context_exist_last = mp->exist_prev; } if (mp->exist_prev) { mp->exist_prev->exist_next = mp->exist_next; } else { pvr2_context_exist_first = mp->exist_next; } if (!pvr2_context_exist_first) { /* Trigger wakeup on control thread in case it is waiting for an exit condition. */ wake_up(&pvr2_context_sync_data); } mutex_unlock(&pvr2_context_mutex); kfree(mp); } static void pvr2_context_notify(void *ptr) { struct pvr2_context *mp = ptr; pvr2_context_set_notify(mp,!0); } static void pvr2_context_check(struct pvr2_context *mp) { struct pvr2_channel *ch1, *ch2; pvr2_trace(PVR2_TRACE_CTXT, "pvr2_context %p (notify)", mp); if (!mp->initialized_flag && !mp->disconnect_flag) { mp->initialized_flag = !0; pvr2_trace(PVR2_TRACE_CTXT, "pvr2_context %p (initialize)", mp); /* Finish hardware initialization */ if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) { mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw); /* Trigger interface initialization. By doing this here initialization runs in our own safe and cozy thread context. */ if (mp->setup_func) mp->setup_func(mp); } else { pvr2_trace(PVR2_TRACE_CTXT, "pvr2_context %p (thread skipping setup)", mp); /* Even though initialization did not succeed, we're still going to continue anyway. We need to do this in order to await the expected disconnect (which we will detect in the normal course of operation). */ } } for (ch1 = mp->mc_first; ch1; ch1 = ch2) { ch2 = ch1->mc_next; if (ch1->check_func) ch1->check_func(ch1); } if (mp->disconnect_flag && !mp->mc_first) { /* Go away... */ pvr2_context_destroy(mp); return; } } static int pvr2_context_shutok(void) { return pvr2_context_cleanup_flag && (pvr2_context_exist_first == NULL); } static int pvr2_context_thread_func(void *foo) { struct pvr2_context *mp; pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context thread start"); do { while ((mp = pvr2_context_notify_first) != NULL) { pvr2_context_set_notify(mp, 0); pvr2_context_check(mp); } wait_event_interruptible( pvr2_context_sync_data, ((pvr2_context_notify_first != NULL) || pvr2_context_shutok())); } while (!pvr2_context_shutok()); pvr2_context_cleaned_flag = !0; wake_up(&pvr2_context_cleanup_data); pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context thread cleaned up"); wait_event_interruptible( pvr2_context_sync_data, kthread_should_stop()); pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context thread end"); return 0; } int pvr2_context_global_init(void) { pvr2_context_thread_ptr = kthread_run(pvr2_context_thread_func, NULL, "pvrusb2-context"); return IS_ERR(pvr2_context_thread_ptr) ? -ENOMEM : 0; } void pvr2_context_global_done(void) { pvr2_context_cleanup_flag = !0; wake_up(&pvr2_context_sync_data); wait_event_interruptible( pvr2_context_cleanup_data, pvr2_context_cleaned_flag); kthread_stop(pvr2_context_thread_ptr); } struct pvr2_context *pvr2_context_create( struct usb_interface *intf, const struct usb_device_id *devid, void (*setup_func)(struct pvr2_context *)) { struct pvr2_context *mp = NULL; mp = kzalloc(sizeof(*mp),GFP_KERNEL); if (!mp) goto done; pvr2_trace(PVR2_TRACE_CTXT,"pvr2_context %p (create)",mp); mp->setup_func = setup_func; mutex_init(&mp->mutex); mutex_lock(&pvr2_context_mutex); mp->exist_prev = pvr2_context_exist_last; mp->exist_next = NULL; pvr2_context_exist_last = mp; if (mp->exist_prev) { mp->exist_prev->exist_next = mp; } else { pvr2_context_exist_first = mp; } mutex_unlock(&pvr2_context_mutex); mp->hdw = pvr2_hdw_create(intf,devid); if (!mp->hdw) { pvr2_context_destroy(mp); mp = NULL; goto done; } pvr2_context_set_notify(mp, !0); done: return mp; } static void pvr2_context_reset_input_limits(struct pvr2_context *mp) { unsigned int tmsk,mmsk; struct pvr2_channel *cp; struct pvr2_hdw *hdw = mp->hdw; mmsk = pvr2_hdw_get_input_available(hdw); tmsk = mmsk; for (cp = mp->mc_first; cp; cp = cp->mc_next) { if (!cp->input_mask) continue; tmsk &= cp->input_mask; } pvr2_hdw_set_input_allowed(hdw,mmsk,tmsk); pvr2_hdw_commit_ctl(hdw); } static void pvr2_context_enter(struct pvr2_context *mp) { mutex_lock(&mp->mutex); } static void pvr2_context_exit(struct pvr2_context *mp) { int destroy_flag = 0; if (!(mp->mc_first || !mp->disconnect_flag)) { destroy_flag = !0; } mutex_unlock(&mp->mutex); if (destroy_flag) pvr2_context_notify(mp); } void pvr2_context_disconnect(struct pvr2_context *mp) { pvr2_hdw_disconnect(mp->hdw); if (!pvr2_context_shutok()) pvr2_context_notify(mp); mp->disconnect_flag = !0; } void pvr2_channel_init(struct pvr2_channel *cp,struct pvr2_context *mp) { pvr2_context_enter(mp); cp->hdw = mp->hdw; cp->mc_head = mp; cp->mc_next = NULL; cp->mc_prev = mp->mc_last; if (mp->mc_last) { mp->mc_last->mc_next = cp; } else { mp->mc_first = cp; } mp->mc_last = cp; pvr2_context_exit(mp); } static void pvr2_channel_disclaim_stream(struct pvr2_channel *cp) { if (!cp->stream) return; pvr2_stream_kill(cp->stream->stream); cp->stream->user = NULL; cp->stream = NULL; } void pvr2_channel_done(struct pvr2_channel *cp) { struct pvr2_context *mp = cp->mc_head; pvr2_context_enter(mp); cp->input_mask = 0; pvr2_channel_disclaim_stream(cp); pvr2_context_reset_input_limits(mp); if (cp->mc_next) { cp->mc_next->mc_prev = cp->mc_prev; } else { mp->mc_last = cp->mc_prev; } if (cp->mc_prev) { cp->mc_prev->mc_next = cp->mc_next; } else { mp->mc_first = cp->mc_next; } cp->hdw = NULL; pvr2_context_exit(mp); } int pvr2_channel_limit_inputs(struct pvr2_channel *cp,unsigned int cmsk) { unsigned int tmsk,mmsk; int ret = 0; struct pvr2_channel *p2; struct pvr2_hdw *hdw = cp->hdw; mmsk = pvr2_hdw_get_input_available(hdw); cmsk &= mmsk; if (cmsk == cp->input_mask) { /* No change; nothing to do */ return 0; } pvr2_context_enter(cp->mc_head); do { if (!cmsk) { cp->input_mask = 0; pvr2_context_reset_input_limits(cp->mc_head); break; } tmsk = mmsk; for (p2 = cp->mc_head->mc_first; p2; p2 = p2->mc_next) { if (p2 == cp) continue; if (!p2->input_mask) continue; tmsk &= p2->input_mask; } if (!(tmsk & cmsk)) { ret = -EPERM; break; } tmsk &= cmsk; if ((ret = pvr2_hdw_set_input_allowed(hdw,mmsk,tmsk)) != 0) { /* Internal failure changing allowed list; probably should not happen, but react if it does. */ break; } cp->input_mask = cmsk; pvr2_hdw_commit_ctl(hdw); } while (0); pvr2_context_exit(cp->mc_head); return ret; } unsigned int pvr2_channel_get_limited_inputs(struct pvr2_channel *cp) { return cp->input_mask; } int pvr2_channel_claim_stream(struct pvr2_channel *cp, struct pvr2_context_stream *sp) { int code = 0; pvr2_context_enter(cp->mc_head); do { if (sp == cp->stream) break; if (sp && sp->user) { code = -EBUSY; break; } pvr2_channel_disclaim_stream(cp); if (!sp) break; sp->user = cp; cp->stream = sp; } while (0); pvr2_context_exit(cp->mc_head); return code; } // This is the marker for the real beginning of a legitimate mpeg2 stream. static char stream_sync_key[] = { 0x00, 0x00, 0x01, 0xba, }; struct pvr2_ioread *pvr2_channel_create_mpeg_stream( struct pvr2_context_stream *sp) { struct pvr2_ioread *cp; cp = pvr2_ioread_create(); if (!cp) return NULL; pvr2_ioread_setup(cp,sp->stream); pvr2_ioread_set_sync_key(cp,stream_sync_key,sizeof(stream_sync_key)); return cp; }
1647 1648 1649 1645 282 348 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> * * Author: * Casey Schaufler <casey@schaufler-ca.com> */ #ifndef _SECURITY_SMACK_H #define _SECURITY_SMACK_H #include <linux/capability.h> #include <linux/spinlock.h> #include <linux/lsm_hooks.h> #include <linux/in.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #endif /* CONFIG_IPV6 */ #include <net/netlabel.h> #include <linux/list.h> #include <linux/rculist.h> #include <linux/lsm_audit.h> #include <linux/msg.h> /* * Use IPv6 port labeling if IPv6 is enabled and secmarks * are not being used. */ #if IS_ENABLED(CONFIG_IPV6) && !defined(CONFIG_SECURITY_SMACK_NETFILTER) #define SMACK_IPV6_PORT_LABELING 1 #endif #if IS_ENABLED(CONFIG_IPV6) && defined(CONFIG_SECURITY_SMACK_NETFILTER) #define SMACK_IPV6_SECMARK_LABELING 1 #endif /* * Smack labels were limited to 23 characters for a long time. */ #define SMK_LABELLEN 24 #define SMK_LONGLABEL 256 /* * This is the repository for labels seen so that it is * not necessary to keep allocating tiny chuncks of memory * and so that they can be shared. * * Labels are never modified in place. Anytime a label * is imported (e.g. xattrset on a file) the list is checked * for it and it is added if it doesn't exist. The address * is passed out in either case. Entries are added, but * never deleted. * * Since labels are hanging around anyway it doesn't * hurt to maintain a secid for those awkward situations * where kernel components that ought to use LSM independent * interfaces don't. The secid should go away when all of * these components have been repaired. * * The cipso value associated with the label gets stored here, too. * * Keep the access rules for this subject label here so that * the entire set of rules does not need to be examined every * time. */ struct smack_known { struct list_head list; struct hlist_node smk_hashed; char *smk_known; u32 smk_secid; struct netlbl_lsm_secattr smk_netlabel; /* on wire labels */ struct list_head smk_rules; /* access rules */ struct mutex smk_rules_lock; /* lock for rules */ }; /* * Maximum number of bytes for the levels in a CIPSO IP option. * Why 23? CIPSO is constrained to 30, so a 32 byte buffer is * bigger than can be used, and 24 is the next lower multiple * of 8, and there are too many issues if there isn't space set * aside for the terminating null byte. */ #define SMK_CIPSOLEN 24 struct superblock_smack { struct smack_known *smk_root; struct smack_known *smk_floor; struct smack_known *smk_hat; struct smack_known *smk_default; int smk_flags; }; /* * Superblock flags */ #define SMK_SB_INITIALIZED 0x01 #define SMK_SB_UNTRUSTED 0x02 struct socket_smack { struct smack_known *smk_out; /* outbound label */ struct smack_known *smk_in; /* inbound label */ struct smack_known *smk_packet; /* TCP peer label */ int smk_state; /* netlabel socket states */ }; #define SMK_NETLBL_UNSET 0 #define SMK_NETLBL_UNLABELED 1 #define SMK_NETLBL_LABELED 2 #define SMK_NETLBL_REQSKB 3 /* * Inode smack data */ struct inode_smack { struct smack_known *smk_inode; /* label of the fso */ struct smack_known *smk_task; /* label of the task */ struct smack_known *smk_mmap; /* label of the mmap domain */ int smk_flags; /* smack inode flags */ }; struct task_smack { struct smack_known *smk_task; /* label for access control */ struct smack_known *smk_forked; /* label when forked */ struct smack_known *smk_transmuted;/* label when transmuted */ struct list_head smk_rules; /* per task access rules */ struct mutex smk_rules_lock; /* lock for the rules */ struct list_head smk_relabel; /* transit allowed labels */ }; #define SMK_INODE_INSTANT 0x01 /* inode is instantiated */ #define SMK_INODE_TRANSMUTE 0x02 /* directory is transmuting */ #define SMK_INODE_CHANGED 0x04 /* smack was transmuted (unused) */ #define SMK_INODE_IMPURE 0x08 /* involved in an impure transaction */ /* * A label access rule. */ struct smack_rule { struct list_head list; struct smack_known *smk_subject; struct smack_known *smk_object; int smk_access; }; /* * An entry in the table identifying IPv4 hosts. */ struct smk_net4addr { struct list_head list; struct in_addr smk_host; /* network address */ struct in_addr smk_mask; /* network mask */ int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; /* * An entry in the table identifying IPv6 hosts. */ struct smk_net6addr { struct list_head list; struct in6_addr smk_host; /* network address */ struct in6_addr smk_mask; /* network mask */ int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; /* * An entry in the table identifying ports. */ struct smk_port_label { struct list_head list; struct sock *smk_sock; /* socket initialized on */ unsigned short smk_port; /* the port number */ struct smack_known *smk_in; /* inbound label */ struct smack_known *smk_out; /* outgoing label */ short smk_sock_type; /* Socket type */ short smk_can_reuse; }; struct smack_known_list_elem { struct list_head list; struct smack_known *smk_label; }; enum { Opt_error = -1, Opt_fsdefault = 0, Opt_fsfloor = 1, Opt_fshat = 2, Opt_fsroot = 3, Opt_fstransmute = 4, }; #define SMACK_DELETE_OPTION "-DELETE" #define SMACK_CIPSO_OPTION "-CIPSO" /* * CIPSO defaults. */ #define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */ #define SMACK_CIPSO_DOI_INVALID -1 /* Not a DOI */ #define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */ #define SMACK_CIPSO_MAPPED_DEFAULT 251 /* Also arbitrary */ #define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */ /* * CIPSO 2.2 standard is 239, but Smack wants to use the * categories in a structured way that limits the value to * the bits in 23 bytes, hence the unusual number. */ #define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */ /* * Ptrace rules */ #define SMACK_PTRACE_DEFAULT 0 #define SMACK_PTRACE_EXACT 1 #define SMACK_PTRACE_DRACONIAN 2 #define SMACK_PTRACE_MAX SMACK_PTRACE_DRACONIAN /* * Flags for untraditional access modes. * It shouldn't be necessary to avoid conflicts with definitions * in fs.h, but do so anyway. */ #define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */ #define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */ #define MAY_BRINGUP 0x00004000 /* Report use of this rule */ /* * The policy for delivering signals is configurable. * It is usually "write", but can be "append". */ #ifdef CONFIG_SECURITY_SMACK_APPEND_SIGNALS #define MAY_DELIVER MAY_APPEND /* Signal delivery requires append */ #else #define MAY_DELIVER MAY_WRITE /* Signal delivery requires write */ #endif #define SMACK_BRINGUP_ALLOW 1 /* Allow bringup mode */ #define SMACK_UNCONFINED_SUBJECT 2 /* Allow unconfined label */ #define SMACK_UNCONFINED_OBJECT 3 /* Allow unconfined label */ /* * Just to make the common cases easier to deal with */ #define MAY_ANYREAD (MAY_READ | MAY_EXEC) #define MAY_READWRITE (MAY_READ | MAY_WRITE) #define MAY_NOT 0 /* * Number of access types used by Smack (rwxatlb) */ #define SMK_NUM_ACCESS_TYPE 7 /* SMACK data */ struct smack_audit_data { const char *function; char *subject; char *object; char *request; int result; }; /* * Smack audit data; is empty if CONFIG_AUDIT not set * to save some stack */ struct smk_audit_info { #ifdef CONFIG_AUDIT struct common_audit_data a; struct smack_audit_data sad; #endif }; /* * These functions are in smack_access.c */ int smk_access_entry(char *, char *, struct list_head *); int smk_access(struct smack_known *, struct smack_known *, int, struct smk_audit_info *); int smk_tskacc(struct task_smack *, struct smack_known *, u32, struct smk_audit_info *); int smk_curacc(struct smack_known *, u32, struct smk_audit_info *); struct smack_known *smack_from_secid(const u32); char *smk_parse_smack(const char *string, int len); int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int); struct smack_known *smk_import_entry(const char *, int); void smk_insert_entry(struct smack_known *skp); struct smack_known *smk_find_entry(const char *); bool smack_privileged(int cap); bool smack_privileged_cred(int cap, const struct cred *cred); void smk_destroy_label_list(struct list_head *list); int smack_populate_secattr(struct smack_known *skp); /* * Shared data. */ extern int smack_enabled __initdata; extern int smack_cipso_direct; extern int smack_cipso_mapped; extern struct smack_known *smack_net_ambient; extern struct smack_known *smack_syslog_label; #ifdef CONFIG_SECURITY_SMACK_BRINGUP extern struct smack_known *smack_unconfined; #endif extern int smack_ptrace_rule; extern struct lsm_blob_sizes smack_blob_sizes; extern struct smack_known smack_known_floor; extern struct smack_known smack_known_hat; extern struct smack_known smack_known_huh; extern struct smack_known smack_known_star; extern struct smack_known smack_known_web; extern struct mutex smack_known_lock; extern struct list_head smack_known_list; extern struct list_head smk_net4addr_list; extern struct list_head smk_net6addr_list; extern struct mutex smack_onlycap_lock; extern struct list_head smack_onlycap_list; #define SMACK_HASH_SLOTS 16 extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS]; extern struct kmem_cache *smack_rule_cache; static inline struct task_smack *smack_cred(const struct cred *cred) { return cred->security + smack_blob_sizes.lbs_cred; } static inline struct smack_known **smack_file(const struct file *file) { return (struct smack_known **)(file->f_security + smack_blob_sizes.lbs_file); } static inline struct inode_smack *smack_inode(const struct inode *inode) { return inode->i_security + smack_blob_sizes.lbs_inode; } static inline struct smack_known **smack_msg_msg(const struct msg_msg *msg) { return msg->security + smack_blob_sizes.lbs_msg_msg; } static inline struct smack_known **smack_ipc(const struct kern_ipc_perm *ipc) { return ipc->security + smack_blob_sizes.lbs_ipc; } static inline struct superblock_smack *smack_superblock( const struct super_block *superblock) { return superblock->s_security + smack_blob_sizes.lbs_superblock; } /* * Is the directory transmuting? */ static inline int smk_inode_transmutable(const struct inode *isp) { struct inode_smack *sip = smack_inode(isp); return (sip->smk_flags & SMK_INODE_TRANSMUTE) != 0; } /* * Present a pointer to the smack label entry in an inode blob. */ static inline struct smack_known *smk_of_inode(const struct inode *isp) { struct inode_smack *sip = smack_inode(isp); return sip->smk_inode; } /* * Present a pointer to the smack label entry in an task blob. */ static inline struct smack_known *smk_of_task(const struct task_smack *tsp) { return tsp->smk_task; } static inline struct smack_known *smk_of_task_struct_obj( const struct task_struct *t) { struct smack_known *skp; const struct cred *cred; rcu_read_lock(); cred = __task_cred(t); skp = smk_of_task(smack_cred(cred)); rcu_read_unlock(); return skp; } /* * Present a pointer to the forked smack label entry in an task blob. */ static inline struct smack_known *smk_of_forked(const struct task_smack *tsp) { return tsp->smk_forked; } /* * Present a pointer to the smack label in the current task blob. */ static inline struct smack_known *smk_of_current(void) { return smk_of_task(smack_cred(current_cred())); } /* * logging functions */ #define SMACK_AUDIT_DENIED 0x1 #define SMACK_AUDIT_ACCEPT 0x2 extern int log_policy; void smack_log(char *subject_label, char *object_label, int request, int result, struct smk_audit_info *auditdata); #ifdef CONFIG_AUDIT /* * some inline functions to set up audit data * they do nothing if CONFIG_AUDIT is not set * */ static inline void smk_ad_init(struct smk_audit_info *a, const char *func, char type) { memset(&a->sad, 0, sizeof(a->sad)); a->a.type = type; a->a.smack_audit_data = &a->sad; a->a.smack_audit_data->function = func; } static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func, char type, struct lsm_network_audit *net) { smk_ad_init(a, func, type); memset(net, 0, sizeof(*net)); a->a.u.net = net; } static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a, struct task_struct *t) { a->a.u.tsk = t; } static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a, struct dentry *d) { a->a.u.dentry = d; } static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a, struct inode *i) { a->a.u.inode = i; } static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a, struct path p) { a->a.u.path = p; } static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a, struct sock *sk) { a->a.u.net->sk = sk; } #else /* no AUDIT */ static inline void smk_ad_init(struct smk_audit_info *a, const char *func, char type) { } static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a, struct task_struct *t) { } static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a, struct dentry *d) { } static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a, struct inode *i) { } static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a, struct path p) { } static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a, struct sock *sk) { } #endif #endif /* _SECURITY_SMACK_H */
1 1 1 1 1 1989 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 // SPDX-License-Identifier: GPL-2.0 /* * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h * * Copyright (c) 2011, Tom Herbert <therbert@google.com> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/dynamic_queue_limits.h> #include <linux/compiler.h> #include <linux/export.h> #include <trace/events/napi.h> #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) static void dql_check_stall(struct dql *dql, unsigned short stall_thrs) { unsigned long now; if (!stall_thrs) return; now = jiffies; /* Check for a potential stall */ if (time_after_eq(now, dql->last_reap + stall_thrs)) { unsigned long hist_head, t, start, end; /* We are trying to detect a period of at least @stall_thrs * jiffies without any Tx completions, but during first half * of which some Tx was posted. */ dqs_again: hist_head = READ_ONCE(dql->history_head); /* pairs with smp_wmb() in dql_queued() */ smp_rmb(); /* Get the previous entry in the ring buffer, which is the * oldest sample. */ start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG; /* Advance start to continue from the last reap time */ if (time_before(start, dql->last_reap + 1)) start = dql->last_reap + 1; /* Newest sample we should have already seen a completion for */ end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1); /* Shrink the search space to [start, (now - start_thrs/2)] if * `end` is beyond the stall zone */ if (time_before(now, end + stall_thrs / 2)) end = now - stall_thrs / 2; /* Search for the queued time in [t, end] */ for (t = start; time_before_eq(t, end); t++) if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG), dql->history)) break; /* Variable t contains the time of the queue */ if (!time_before_eq(t, end)) goto no_stall; /* The ring buffer was modified in the meantime, retry */ if (hist_head != READ_ONCE(dql->history_head)) goto dqs_again; dql->stall_cnt++; dql->stall_max = max_t(unsigned short, dql->stall_max, now - t); trace_dql_stall_detected(dql->stall_thrs, now - t, dql->last_reap, dql->history_head, now, dql->history); } no_stall: dql->last_reap = now; } /* Records completed count and recalculates the queue limit */ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; unsigned int ovlimit, completed, num_queued; unsigned short stall_thrs; bool all_prev_completed; num_queued = READ_ONCE(dql->num_queued); /* Read stall_thrs in advance since it belongs to the same (first) * cache line as ->num_queued. This way, dql_check_stall() does not * need to touch the first cache line again later, reducing the window * of possible false sharing. */ stall_thrs = READ_ONCE(dql->stall_thrs); /* Can't complete more than what's in queue */ BUG_ON(count > num_queued - dql->num_completed); completed = dql->num_completed + count; limit = dql->limit; ovlimit = POSDIFF(num_queued - dql->num_completed, limit); inprogress = num_queued - completed; prev_inprogress = dql->prev_num_queued - dql->num_completed; all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued); if ((ovlimit && !inprogress) || (dql->prev_ovlimit && all_prev_completed)) { /* * Queue considered starved if: * - The queue was over-limit in the last interval, * and there is no more data in the queue. * OR * - The queue was over-limit in the previous interval and * when enqueuing it was possible that all queued data * had been consumed. This covers the case when queue * may have becomes starved between completion processing * running and next time enqueue was scheduled. * * When queue is starved increase the limit by the amount * of bytes both sent and completed in the last interval, * plus any previous over-limit. */ limit += POSDIFF(completed, dql->prev_num_queued) + dql->prev_ovlimit; dql->slack_start_time = jiffies; dql->lowest_slack = UINT_MAX; } else if (inprogress && prev_inprogress && !all_prev_completed) { /* * Queue was not starved, check if the limit can be decreased. * A decrease is only considered if the queue has been busy in * the whole interval (the check above). * * If there is slack, the amount of excess data queued above * the amount needed to prevent starvation, the queue limit * can be decreased. To avoid hysteresis we consider the * minimum amount of slack found over several iterations of the * completion routine. */ unsigned int slack, slack_last_objs; /* * Slack is the maximum of * - The queue limit plus previous over-limit minus twice * the number of objects completed. Note that two times * number of completed bytes is a basis for an upper bound * of the limit. * - Portion of objects in the last queuing operation that * was not part of non-zero previous over-limit. That is * "round down" by non-overlimit portion of the last * queueing operation. */ slack = POSDIFF(limit + dql->prev_ovlimit, 2 * (completed - dql->num_completed)); slack_last_objs = dql->prev_ovlimit ? POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0; slack = max(slack, slack_last_objs); if (slack < dql->lowest_slack) dql->lowest_slack = slack; if (time_after(jiffies, dql->slack_start_time + dql->slack_hold_time)) { limit = POSDIFF(limit, dql->lowest_slack); dql->slack_start_time = jiffies; dql->lowest_slack = UINT_MAX; } } /* Enforce bounds on limit */ limit = clamp(limit, dql->min_limit, dql->max_limit); if (limit != dql->limit) { dql->limit = limit; ovlimit = 0; } dql->adj_limit = limit + completed; dql->prev_ovlimit = ovlimit; dql->prev_last_obj_cnt = dql->last_obj_cnt; dql->num_completed = completed; dql->prev_num_queued = num_queued; dql_check_stall(dql, stall_thrs); } EXPORT_SYMBOL(dql_completed); void dql_reset(struct dql *dql) { /* Reset all dynamic values */ dql->limit = 0; dql->num_queued = 0; dql->num_completed = 0; dql->last_obj_cnt = 0; dql->prev_num_queued = 0; dql->prev_last_obj_cnt = 0; dql->prev_ovlimit = 0; dql->lowest_slack = UINT_MAX; dql->slack_start_time = jiffies; dql->last_reap = jiffies; dql->history_head = jiffies / BITS_PER_LONG; memset(dql->history, 0, sizeof(dql->history)); } EXPORT_SYMBOL(dql_reset); void dql_init(struct dql *dql, unsigned int hold_time) { dql->max_limit = DQL_MAX_LIMIT; dql->min_limit = 0; dql->slack_hold_time = hold_time; dql->stall_thrs = 0; dql_reset(dql); } EXPORT_SYMBOL(dql_init);
11 11 11 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for UC-Logic devices not fully compliant with HID standard * - original and fixed report descriptors * * Copyright (c) 2010-2017 Nikolai Kondrashov * Copyright (c) 2013 Martin Rusko */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include "hid-uclogic-rdesc.h" #include <linux/slab.h> #include <asm/unaligned.h> #include <kunit/visibility.h> /* Fixed WP4030U report descriptor */ __u8 uclogic_rdesc_wp4030u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0xB8, 0x0B, /* Physical Maximum (3000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp4030u_fixed_size = sizeof(uclogic_rdesc_wp4030u_fixed_arr); /* Fixed WP5540U report descriptor */ __u8 uclogic_rdesc_wp5540u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x7C, 0x15, /* Physical Maximum (5500), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0xA0, 0x0F, /* Physical Maximum (4000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x15, 0x81, /* Logical Minimum (-127), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp5540u_fixed_size = sizeof(uclogic_rdesc_wp5540u_fixed_arr); /* Fixed WP8060U report descriptor */ __u8 uclogic_rdesc_wp8060u_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x70, 0x17, /* Physical Maximum (6000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x15, 0x81, /* Logical Minimum (-127), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp8060u_fixed_size = sizeof(uclogic_rdesc_wp8060u_fixed_arr); /* Fixed WP1062 report descriptor */ __u8 uclogic_rdesc_wp1062_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x10, 0x27, /* Physical Maximum (10000), */ 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0xB7, 0x19, /* Physical Maximum (6583), */ 0x26, 0x6E, 0x33, /* Logical Maximum (13166), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_wp1062_fixed_size = sizeof(uclogic_rdesc_wp1062_fixed_arr); /* Fixed PF1209 report descriptor */ __u8 uclogic_rdesc_pf1209_fixed_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0xE0, 0x2E, /* Physical Maximum (12000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x28, 0x23, /* Physical Maximum (9000), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0, /* End Collection, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x15, 0x81, /* Logical Minimum (-127), */ 0x25, 0x7F, /* Logical Maximum (127), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_pf1209_fixed_size = sizeof(uclogic_rdesc_pf1209_fixed_arr); /* Fixed PID 0522 tablet report descriptor, interface 0 (stylus) */ __u8 uclogic_rdesc_twhl850_fixed0_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x81, 0x02, /* Input (Variable), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x09, 0x32, /* Usage (In Range), */ 0x81, 0x02, /* Input (Variable), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x75, 0x10, /* Report Size (16), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */ 0x26, 0x00, 0x7D, /* Logical Maximum (32000), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x88, 0x13, /* Physical Maximum (5000), */ 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twhl850_fixed0_size = sizeof(uclogic_rdesc_twhl850_fixed0_arr); /* Fixed PID 0522 tablet report descriptor, interface 1 (mouse) */ __u8 uclogic_rdesc_twhl850_fixed1_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x01, /* Report ID (1), */ 0x09, 0x01, /* Usage (Pointer), */ 0xA0, /* Collection (Physical), */ 0x05, 0x09, /* Usage Page (Button), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x03, /* Usage Maximum (03h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twhl850_fixed1_size = sizeof(uclogic_rdesc_twhl850_fixed1_arr); /* Fixed PID 0522 tablet report descriptor, interface 2 (frame buttons) */ __u8 uclogic_rdesc_twhl850_fixed2_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x06, /* Usage (Keyboard), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x03, /* Report ID (3), */ 0x05, 0x07, /* Usage Page (Keyboard), */ 0x14, /* Logical Minimum (0), */ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x02, /* Input (Variable), */ 0x18, /* Usage Minimum (None), */ 0x29, 0xFF, /* Usage Maximum (FFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x06, /* Report Count (6), */ 0x80, /* Input, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twhl850_fixed2_size = sizeof(uclogic_rdesc_twhl850_fixed2_arr); /* Fixed TWHA60 report descriptor, interface 0 (stylus) */ __u8 uclogic_rdesc_twha60_fixed0_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x09, /* Report ID (9), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x14, /* Logical Minimum (0), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x65, 0x13, /* Unit (Inch), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x46, 0x10, 0x27, /* Physical Maximum (10000), */ 0x27, 0x3F, 0x9C, 0x00, 0x00, /* Logical Maximum (39999), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x46, 0x6A, 0x18, /* Physical Maximum (6250), */ 0x26, 0xA7, 0x61, /* Logical Maximum (24999), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twha60_fixed0_size = sizeof(uclogic_rdesc_twha60_fixed0_arr); /* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */ __u8 uclogic_rdesc_twha60_fixed1_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x06, /* Usage (Keyboard), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x05, /* Report ID (5), */ 0x05, 0x07, /* Usage Page (Keyboard), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0x95, 0x0C, /* Report Count (12), */ 0x19, 0x3A, /* Usage Minimum (KB F1), */ 0x29, 0x45, /* Usage Maximum (KB F12), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x19, 0x68, /* Usage Minimum (KB F13), */ 0x29, 0x73, /* Usage Maximum (KB F24), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_twha60_fixed1_size = sizeof(uclogic_rdesc_twha60_fixed1_arr); /* Fixed report descriptor template for (tweaked) v1 pen reports */ const __u8 uclogic_rdesc_v1_pen_template_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x07, /* Report ID (7), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM), /* Logical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v1_pen_template_size = sizeof(uclogic_rdesc_v1_pen_template_arr); /* Fixed report descriptor template for (tweaked) v2 pen reports */ const __u8 uclogic_rdesc_v2_pen_template_arr[] = { 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x08, /* Report ID (8), */ 0x09, 0x20, /* Usage (Stylus), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x95, 0x01, /* Report Count (1), */ 0xA4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0xFD, /* Unit Exponent (-3), */ 0x75, 0x18, /* Report Size (24), */ 0x34, /* Physical Minimum (0), */ 0x09, 0x30, /* Usage (X), */ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xB4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x75, 0x10, /* Report Size (16), */ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM), /* Logical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x54, /* Unit Exponent (0), */ 0x65, 0x14, /* Unit (Degrees), */ 0x35, 0xC4, /* Physical Minimum (-60), */ 0x45, 0x3C, /* Physical Maximum (60), */ 0x15, 0xC4, /* Logical Minimum (-60), */ 0x25, 0x3C, /* Logical Maximum (60), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x09, 0x3D, /* Usage (X Tilt), */ 0x09, 0x3E, /* Usage (Y Tilt), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_pen_template_size = sizeof(uclogic_rdesc_v2_pen_template_arr); /* * Expand to the contents of a generic frame buttons report descriptor. * * @_id: The report ID to use. * @_size: Size of the report to pad to, including report ID, bytes. */ #define UCLOGIC_RDESC_FRAME_BUTTONS_BYTES(_id, _size) \ 0x05, 0x01, /* Usage Page (Desktop), */ \ 0x09, 0x07, /* Usage (Keypad), */ \ 0xA1, 0x01, /* Collection (Application), */ \ 0x85, (_id), /* Report ID (_id), */ \ 0x14, /* Logical Minimum (0), */ \ 0x25, 0x01, /* Logical Maximum (1), */ \ 0x75, 0x01, /* Report Size (1), */ \ 0x05, 0x0D, /* Usage Page (Digitizer), */ \ 0x09, 0x39, /* Usage (Tablet Function Keys), */ \ 0xA0, /* Collection (Physical), */ \ 0x09, 0x44, /* Usage (Barrel Switch), */ \ 0x95, 0x01, /* Report Count (1), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0x05, 0x01, /* Usage Page (Desktop), */ \ 0x09, 0x30, /* Usage (X), */ \ 0x09, 0x31, /* Usage (Y), */ \ 0x95, 0x02, /* Report Count (2), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0x95, 0x15, /* Report Count (21), */ \ 0x81, 0x01, /* Input (Constant), */ \ 0x05, 0x09, /* Usage Page (Button), */ \ 0x19, 0x01, /* Usage Minimum (01h), */ \ 0x29, 0x0A, /* Usage Maximum (0Ah), */ \ 0x95, 0x0A, /* Report Count (10), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0xC0, /* End Collection, */ \ 0x05, 0x01, /* Usage Page (Desktop), */ \ 0x09, 0x05, /* Usage (Gamepad), */ \ 0xA0, /* Collection (Physical), */ \ 0x05, 0x09, /* Usage Page (Button), */ \ 0x19, 0x01, /* Usage Minimum (01h), */ \ 0x29, 0x0A, /* Usage Maximum (0Ah), */ \ 0x95, 0x0A, /* Report Count (10), */ \ 0x81, 0x02, /* Input (Variable), */ \ 0x95, ((_size) * 8 - 52), \ /* Report Count (padding), */ \ 0x81, 0x01, /* Input (Constant), */ \ 0xC0, /* End Collection, */ \ 0xC0 /* End Collection */ /* Fixed report descriptor for (tweaked) v1 frame reports */ const __u8 uclogic_rdesc_v1_frame_arr[] = { UCLOGIC_RDESC_FRAME_BUTTONS_BYTES(UCLOGIC_RDESC_V1_FRAME_ID, 8) }; const size_t uclogic_rdesc_v1_frame_size = sizeof(uclogic_rdesc_v1_frame_arr); /* Fixed report descriptor for (tweaked) v2 frame button reports */ const __u8 uclogic_rdesc_v2_frame_buttons_arr[] = { UCLOGIC_RDESC_FRAME_BUTTONS_BYTES(UCLOGIC_RDESC_V2_FRAME_BUTTONS_ID, 12) }; const size_t uclogic_rdesc_v2_frame_buttons_size = sizeof(uclogic_rdesc_v2_frame_buttons_arr); /* Fixed report descriptor for (tweaked) v2 frame touch ring reports */ const __u8 uclogic_rdesc_v2_frame_touch_ring_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V2_FRAME_TOUCH_ID, /* Report ID (TOUCH_ID), */ 0x14, /* Logical Minimum (0), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x09, 0x01, /* Usage (01h), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x0B, /* Logical Maximum (11), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x2E, /* Report Count (46), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_frame_touch_ring_size = sizeof(uclogic_rdesc_v2_frame_touch_ring_arr); /* Fixed report descriptor for (tweaked) v2 frame touch strip reports */ const __u8 uclogic_rdesc_v2_frame_touch_strip_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V2_FRAME_TOUCH_ID, /* Report ID (TOUCH_ID), */ 0x14, /* Logical Minimum (0), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x09, 0x01, /* Usage (01h), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x33, /* Usage (Rx), */ 0x09, 0x34, /* Usage (Ry), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x2E, /* Report Count (46), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_frame_touch_strip_size = sizeof(uclogic_rdesc_v2_frame_touch_strip_arr); /* Fixed report descriptor for (tweaked) v2 frame dial reports */ const __u8 uclogic_rdesc_v2_frame_dial_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V2_FRAME_DIAL_ID, /* Report ID (DIAL_ID), */ 0x14, /* Logical Minimum (0), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x09, /* Usage Page (Button), */ 0x09, 0x01, /* Usage (01h), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x2E, /* Report Count (46), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_v2_frame_dial_size = sizeof(uclogic_rdesc_v2_frame_dial_arr); const __u8 uclogic_ugee_v2_probe_arr[] = { 0x02, 0xb0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; const size_t uclogic_ugee_v2_probe_size = sizeof(uclogic_ugee_v2_probe_arr); const int uclogic_ugee_v2_probe_endpoint = 0x03; /* Fixed report descriptor template for UGEE v2 pen reports */ const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[] = { 0x05, 0x0d, /* Usage Page (Digitizers), */ 0x09, 0x01, /* Usage (Digitizer), */ 0xa1, 0x01, /* Collection (Application), */ 0x85, 0x02, /* Report ID (2), */ 0x09, 0x20, /* Usage (Stylus), */ 0xa1, 0x00, /* Collection (Physical), */ 0x09, 0x42, /* Usage (Tip Switch), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x09, 0x46, /* Usage (Tablet Pick), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x09, 0x32, /* Usage (In Range), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x01, /* Report Count (1), */ 0x35, 0x00, /* Physical Minimum (0), */ 0xa4, /* Push, */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x65, 0x13, /* Unit (Inch), */ 0x55, 0x0d, /* Unit Exponent (-3), */ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x31, /* Usage (Y), */ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */ 0x81, 0x02, /* Input (Variable), */ 0xb4, /* Pop, */ 0x09, 0x30, /* Usage (Tip Pressure), */ 0x45, 0x00, /* Physical Maximum (0), */ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM), /* Logical Maximum (PLACEHOLDER), */ 0x75, 0x0D, /* Report Size (13), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x3d, /* Usage (X Tilt), */ 0x35, 0xC3, /* Physical Minimum (-61), */ 0x45, 0x3C, /* Physical Maximum (60), */ 0x15, 0xC3, /* Logical Minimum (-61), */ 0x25, 0x3C, /* Logical Maximum (60), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x3e, /* Usage (Y Tilt), */ 0x35, 0xC3, /* Physical Minimum (-61), */ 0x45, 0x3C, /* Physical Maximum (60), */ 0x15, 0xC3, /* Logical Minimum (-61), */ 0x25, 0x3C, /* Logical Maximum (60), */ 0x81, 0x02, /* Input (Variable), */ 0xc0, /* End Collection, */ 0xc0, /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_pen_template_size = sizeof(uclogic_rdesc_ugee_v2_pen_template_arr); /* Fixed report descriptor template for UGEE v2 frame reports (buttons only) */ const __u8 uclogic_rdesc_ugee_v2_frame_btn_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V1_FRAME_ID, /* Report ID, */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ UCLOGIC_RDESC_FRAME_PH_BTN, /* Usage Maximum (PLACEHOLDER), */ 0x95, 0x0A, /* Report Count (10), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x46, /* Report Count (70), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_frame_btn_template_size = sizeof(uclogic_rdesc_ugee_v2_frame_btn_template_arr); /* Fixed report descriptor template for UGEE v2 frame reports (dial) */ const __u8 uclogic_rdesc_ugee_v2_frame_dial_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_V1_FRAME_ID, /* Report ID, */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ UCLOGIC_RDESC_FRAME_PH_BTN, /* Usage Maximum (PLACEHOLDER), */ 0x95, 0x0A, /* Report Count (10), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_frame_dial_template_size = sizeof(uclogic_rdesc_ugee_v2_frame_dial_template_arr); /* Fixed report descriptor template for UGEE v2 frame reports (mouse) */ const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x02, /* Usage (Mouse), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x01, /* Report ID (1), */ 0x05, 0x01, /* Usage Page (Pointer), */ 0xA0, /* Collection (Physical), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x02, /* Usage Maximum (02h), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Generic Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x75, 0x10, /* Report Size (16), */ 0x95, 0x02, /* Report Count (2), */ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size = sizeof(uclogic_rdesc_ugee_v2_frame_mouse_template_arr); /* Fixed report descriptor template for UGEE v2 battery reports */ const __u8 uclogic_rdesc_ugee_v2_battery_template_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, UCLOGIC_RDESC_UGEE_V2_BATTERY_ID, /* Report ID, */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x84, /* Usage Page (Power Device), */ 0x05, 0x85, /* Usage Page (Battery System), */ 0x09, 0x65, /* Usage Page (AbsoluteStateOfCharge), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xff, 0x00, /* Logical Maximum (255), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x01, /* Report Count (1), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x09, 0x44, /* Usage Page (Charging), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x07, /* Report Count (7), */ 0x81, 0x01, /* Input (Constant), */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_v2_battery_template_size = sizeof(uclogic_rdesc_ugee_v2_battery_template_arr); /* Fixed report descriptor for Ugee EX07 frame */ const __u8 uclogic_rdesc_ugee_ex07_frame_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x06, /* Report ID (6), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x05, 0x09, /* Usage Page (Button), */ 0x75, 0x01, /* Report Size (1), */ 0x19, 0x03, /* Usage Minimum (03h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x1A, /* Report Count (26), */ 0x81, 0x03, /* Input (Constant, Variable), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x02, /* Usage Maximum (02h), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_ex07_frame_size = sizeof(uclogic_rdesc_ugee_ex07_frame_arr); /* Fixed report descriptor for Ugee G5 frame controls */ const __u8 uclogic_rdesc_ugee_g5_frame_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x06, /* Report ID (6), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x05, /* Usage Maximum (05h), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x05, /* Report Count (5), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x03, /* Report Count (3), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x0A, 0xFF, 0xFF, /* Usage (FFFFh), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x0B, /* Report Count (11), */ 0x81, 0x01, /* Input (Constant), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x38, /* Usage (Wheel), */ 0x15, 0xFF, /* Logical Minimum (-1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x02, /* Report Size (2), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x06, /* Input (Variable, Relative), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_ugee_g5_frame_size = sizeof(uclogic_rdesc_ugee_g5_frame_arr); /* Fixed report descriptor for XP-Pen Deco 01 frame controls */ const __u8 uclogic_rdesc_xppen_deco01_frame_arr[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x07, /* Usage (Keypad), */ 0xA1, 0x01, /* Collection (Application), */ 0x85, 0x06, /* Report ID (6), */ 0x14, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x39, /* Usage (Tablet Function Keys), */ 0xA0, /* Collection (Physical), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x08, /* Usage Maximum (08h), */ 0x95, 0x08, /* Report Count (8), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x0D, /* Usage Page (Digitizer), */ 0x09, 0x44, /* Usage (Barrel Switch), */ 0x95, 0x01, /* Report Count (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x30, /* Usage (X), */ 0x09, 0x31, /* Usage (Y), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x15, /* Report Count (21), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; const size_t uclogic_rdesc_xppen_deco01_frame_size = sizeof(uclogic_rdesc_xppen_deco01_frame_arr); /** * uclogic_rdesc_template_apply() - apply report descriptor parameters to a * report descriptor template, creating a report descriptor. Copies the * template over to the new report descriptor and replaces every occurrence of * the template placeholders, followed by an index byte, with the value from the * parameter list at that index. * * @template_ptr: Pointer to the template buffer. * @template_size: Size of the template buffer. * @param_list: List of template parameters. * @param_num: Number of parameters in the list. * * Returns: * Kmalloc-allocated pointer to the created report descriptor, * or NULL if allocation failed. */ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr, size_t template_size, const s32 *param_list, size_t param_num) { static const __u8 btn_head[] = {UCLOGIC_RDESC_FRAME_PH_BTN_HEAD}; static const __u8 pen_head[] = {UCLOGIC_RDESC_PEN_PH_HEAD}; __u8 *rdesc_ptr; __u8 *p; s32 v; rdesc_ptr = kmemdup(template_ptr, template_size, GFP_KERNEL); if (rdesc_ptr == NULL) return NULL; for (p = rdesc_ptr; p + sizeof(btn_head) < rdesc_ptr + template_size;) { if (p + sizeof(pen_head) < rdesc_ptr + template_size && memcmp(p, pen_head, sizeof(pen_head)) == 0 && p[sizeof(pen_head)] < param_num) { v = param_list[p[sizeof(pen_head)]]; put_unaligned((__force u32)cpu_to_le32(v), (s32 *)p); p += sizeof(pen_head) + 1; } else if (memcmp(p, btn_head, sizeof(btn_head)) == 0 && p[sizeof(btn_head)] < param_num) { v = param_list[p[sizeof(btn_head)]]; put_unaligned((__u8)0x2A, p); /* Usage Maximum */ put_unaligned((__force u16)cpu_to_le16(v), (s16 *)(p + 1)); p += sizeof(btn_head) + 1; } else { p++; } } return rdesc_ptr; } EXPORT_SYMBOL_IF_KUNIT(uclogic_rdesc_template_apply);
2 2 2 2 2 32 27 2 5 5 2 7 5 2 2 1 3 3 1 1 3 2 1 45 2 33 11 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> * Copyright (c) 2014 Intel Corporation * Author: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com> * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/static_key.h> #include <net/dst.h> #include <net/ip.h> #include <net/sock.h> #include <net/tcp_states.h> /* for TCP_TIME_WAIT */ #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nft_meta.h> #include <net/netfilter/nf_tables_offload.h> #include <uapi/linux/netfilter_bridge.h> /* NF_BR_PRE_ROUTING */ #define NFT_META_SECS_PER_MINUTE 60 #define NFT_META_SECS_PER_HOUR 3600 #define NFT_META_SECS_PER_DAY 86400 #define NFT_META_DAYS_PER_WEEK 7 static u8 nft_meta_weekday(void) { time64_t secs = ktime_get_real_seconds(); unsigned int dse; u8 wday; secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest; dse = div_u64(secs, NFT_META_SECS_PER_DAY); wday = (4 + dse) % NFT_META_DAYS_PER_WEEK; return wday; } static u32 nft_meta_hour(time64_t secs) { struct tm tm; time64_to_tm(secs, 0, &tm); return tm.tm_hour * NFT_META_SECS_PER_HOUR + tm.tm_min * NFT_META_SECS_PER_MINUTE + tm.tm_sec; } static noinline_for_stack void nft_meta_get_eval_time(enum nft_meta_keys key, u32 *dest) { switch (key) { case NFT_META_TIME_NS: nft_reg_store64((u64 *)dest, ktime_get_real_ns()); break; case NFT_META_TIME_DAY: nft_reg_store8(dest, nft_meta_weekday()); break; case NFT_META_TIME_HOUR: *dest = nft_meta_hour(ktime_get_real_seconds()); break; default: break; } } static noinline bool nft_meta_get_eval_pkttype_lo(const struct nft_pktinfo *pkt, u32 *dest) { const struct sk_buff *skb = pkt->skb; switch (nft_pf(pkt)) { case NFPROTO_IPV4: if (ipv4_is_multicast(ip_hdr(skb)->daddr)) nft_reg_store8(dest, PACKET_MULTICAST); else nft_reg_store8(dest, PACKET_BROADCAST); break; case NFPROTO_IPV6: nft_reg_store8(dest, PACKET_MULTICAST); break; case NFPROTO_NETDEV: switch (skb->protocol) { case htons(ETH_P_IP): { int noff = skb_network_offset(skb); struct iphdr *iph, _iph; iph = skb_header_pointer(skb, noff, sizeof(_iph), &_iph); if (!iph) return false; if (ipv4_is_multicast(iph->daddr)) nft_reg_store8(dest, PACKET_MULTICAST); else nft_reg_store8(dest, PACKET_BROADCAST); break; } case htons(ETH_P_IPV6): nft_reg_store8(dest, PACKET_MULTICAST); break; default: WARN_ON_ONCE(1); return false; } break; default: WARN_ON_ONCE(1); return false; } return true; } static noinline bool nft_meta_get_eval_skugid(enum nft_meta_keys key, u32 *dest, const struct nft_pktinfo *pkt) { struct sock *sk = skb_to_full_sk(pkt->skb); struct socket *sock; if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk))) return false; read_lock_bh(&sk->sk_callback_lock); sock = sk->sk_socket; if (!sock || !sock->file) { read_unlock_bh(&sk->sk_callback_lock); return false; } switch (key) { case NFT_META_SKUID: *dest = from_kuid_munged(sock_net(sk)->user_ns, sock->file->f_cred->fsuid); break; case NFT_META_SKGID: *dest = from_kgid_munged(sock_net(sk)->user_ns, sock->file->f_cred->fsgid); break; default: break; } read_unlock_bh(&sk->sk_callback_lock); return true; } #ifdef CONFIG_CGROUP_NET_CLASSID static noinline bool nft_meta_get_eval_cgroup(u32 *dest, const struct nft_pktinfo *pkt) { struct sock *sk = skb_to_full_sk(pkt->skb); if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk))) return false; *dest = sock_cgroup_classid(&sk->sk_cgrp_data); return true; } #endif static noinline bool nft_meta_get_eval_kind(enum nft_meta_keys key, u32 *dest, const struct nft_pktinfo *pkt) { const struct net_device *in = nft_in(pkt), *out = nft_out(pkt); switch (key) { case NFT_META_IIFKIND: if (!in || !in->rtnl_link_ops) return false; strscpy_pad((char *)dest, in->rtnl_link_ops->kind, IFNAMSIZ); break; case NFT_META_OIFKIND: if (!out || !out->rtnl_link_ops) return false; strscpy_pad((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ); break; default: return false; } return true; } static void nft_meta_store_ifindex(u32 *dest, const struct net_device *dev) { *dest = dev ? dev->ifindex : 0; } static void nft_meta_store_ifname(u32 *dest, const struct net_device *dev) { strscpy_pad((char *)dest, dev ? dev->name : "", IFNAMSIZ); } static bool nft_meta_store_iftype(u32 *dest, const struct net_device *dev) { if (!dev) return false; nft_reg_store16(dest, dev->type); return true; } static bool nft_meta_store_ifgroup(u32 *dest, const struct net_device *dev) { if (!dev) return false; *dest = dev->group; return true; } static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest, const struct nft_pktinfo *pkt) { switch (key) { case NFT_META_IIFNAME: nft_meta_store_ifname(dest, nft_in(pkt)); break; case NFT_META_OIFNAME: nft_meta_store_ifname(dest, nft_out(pkt)); break; case NFT_META_IIF: nft_meta_store_ifindex(dest, nft_in(pkt)); break; case NFT_META_OIF: nft_meta_store_ifindex(dest, nft_out(pkt)); break; case NFT_META_IFTYPE: if (!nft_meta_store_iftype(dest, pkt->skb->dev)) return false; break; case __NFT_META_IIFTYPE: if (!nft_meta_store_iftype(dest, nft_in(pkt))) return false; break; case NFT_META_OIFTYPE: if (!nft_meta_store_iftype(dest, nft_out(pkt))) return false; break; case NFT_META_IIFGROUP: if (!nft_meta_store_ifgroup(dest, nft_in(pkt))) return false; break; case NFT_META_OIFGROUP: if (!nft_meta_store_ifgroup(dest, nft_out(pkt))) return false; break; default: return false; } return true; } #ifdef CONFIG_IP_ROUTE_CLASSID static noinline bool nft_meta_get_eval_rtclassid(const struct sk_buff *skb, u32 *dest) { const struct dst_entry *dst = skb_dst(skb); if (!dst) return false; *dest = dst->tclassid; return true; } #endif static noinline u32 nft_meta_get_eval_sdif(const struct nft_pktinfo *pkt) { switch (nft_pf(pkt)) { case NFPROTO_IPV4: return inet_sdif(pkt->skb); case NFPROTO_IPV6: return inet6_sdif(pkt->skb); } return 0; } static noinline void nft_meta_get_eval_sdifname(u32 *dest, const struct nft_pktinfo *pkt) { u32 sdif = nft_meta_get_eval_sdif(pkt); const struct net_device *dev; dev = sdif ? dev_get_by_index_rcu(nft_net(pkt), sdif) : NULL; nft_meta_store_ifname(dest, dev); } void nft_meta_get_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_meta *priv = nft_expr_priv(expr); const struct sk_buff *skb = pkt->skb; u32 *dest = &regs->data[priv->dreg]; switch (priv->key) { case NFT_META_LEN: *dest = skb->len; break; case NFT_META_PROTOCOL: nft_reg_store16(dest, (__force u16)skb->protocol); break; case NFT_META_NFPROTO: nft_reg_store8(dest, nft_pf(pkt)); break; case NFT_META_L4PROTO: if (!(pkt->flags & NFT_PKTINFO_L4PROTO)) goto err; nft_reg_store8(dest, pkt->tprot); break; case NFT_META_PRIORITY: *dest = skb->priority; break; case NFT_META_MARK: *dest = skb->mark; break; case NFT_META_IIF: case NFT_META_OIF: case NFT_META_IIFNAME: case NFT_META_OIFNAME: case NFT_META_IIFTYPE: case NFT_META_OIFTYPE: case NFT_META_IIFGROUP: case NFT_META_OIFGROUP: if (!nft_meta_get_eval_ifname(priv->key, dest, pkt)) goto err; break; case NFT_META_SKUID: case NFT_META_SKGID: if (!nft_meta_get_eval_skugid(priv->key, dest, pkt)) goto err; break; #ifdef CONFIG_IP_ROUTE_CLASSID case NFT_META_RTCLASSID: if (!nft_meta_get_eval_rtclassid(skb, dest)) goto err; break; #endif #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: *dest = skb->secmark; break; #endif case NFT_META_PKTTYPE: if (skb->pkt_type != PACKET_LOOPBACK) { nft_reg_store8(dest, skb->pkt_type); break; } if (!nft_meta_get_eval_pkttype_lo(pkt, dest)) goto err; break; case NFT_META_CPU: *dest = raw_smp_processor_id(); break; #ifdef CONFIG_CGROUP_NET_CLASSID case NFT_META_CGROUP: if (!nft_meta_get_eval_cgroup(dest, pkt)) goto err; break; #endif case NFT_META_PRANDOM: *dest = get_random_u32(); break; #ifdef CONFIG_XFRM case NFT_META_SECPATH: nft_reg_store8(dest, secpath_exists(skb)); break; #endif case NFT_META_IIFKIND: case NFT_META_OIFKIND: if (!nft_meta_get_eval_kind(priv->key, dest, pkt)) goto err; break; case NFT_META_TIME_NS: case NFT_META_TIME_DAY: case NFT_META_TIME_HOUR: nft_meta_get_eval_time(priv->key, dest); break; case NFT_META_SDIF: *dest = nft_meta_get_eval_sdif(pkt); break; case NFT_META_SDIFNAME: nft_meta_get_eval_sdifname(dest, pkt); break; default: WARN_ON(1); goto err; } return; err: regs->verdict.code = NFT_BREAK; } EXPORT_SYMBOL_GPL(nft_meta_get_eval); void nft_meta_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_meta *meta = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; u32 *sreg = &regs->data[meta->sreg]; u32 value = *sreg; u8 value8; switch (meta->key) { case NFT_META_MARK: skb->mark = value; break; case NFT_META_PRIORITY: skb->priority = value; break; case NFT_META_PKTTYPE: value8 = nft_reg_load8(sreg); if (skb->pkt_type != value8 && skb_pkt_type_ok(value8) && skb_pkt_type_ok(skb->pkt_type)) skb->pkt_type = value8; break; case NFT_META_NFTRACE: value8 = nft_reg_load8(sreg); skb->nf_trace = !!value8; break; #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: skb->secmark = value; break; #endif default: WARN_ON(1); } } EXPORT_SYMBOL_GPL(nft_meta_set_eval); const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = { [NFTA_META_DREG] = { .type = NLA_U32 }, [NFTA_META_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_META_SREG] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(nft_meta_policy); int nft_meta_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_meta *priv = nft_expr_priv(expr); unsigned int len; priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); switch (priv->key) { case NFT_META_PROTOCOL: case NFT_META_IIFTYPE: case NFT_META_OIFTYPE: len = sizeof(u16); break; case NFT_META_NFPROTO: case NFT_META_L4PROTO: case NFT_META_LEN: case NFT_META_PRIORITY: case NFT_META_MARK: case NFT_META_IIF: case NFT_META_OIF: case NFT_META_SDIF: case NFT_META_SKUID: case NFT_META_SKGID: #ifdef CONFIG_IP_ROUTE_CLASSID case NFT_META_RTCLASSID: #endif #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: #endif case NFT_META_PKTTYPE: case NFT_META_CPU: case NFT_META_IIFGROUP: case NFT_META_OIFGROUP: #ifdef CONFIG_CGROUP_NET_CLASSID case NFT_META_CGROUP: #endif len = sizeof(u32); break; case NFT_META_IIFNAME: case NFT_META_OIFNAME: case NFT_META_IIFKIND: case NFT_META_OIFKIND: case NFT_META_SDIFNAME: len = IFNAMSIZ; break; case NFT_META_PRANDOM: len = sizeof(u32); break; #ifdef CONFIG_XFRM case NFT_META_SECPATH: len = sizeof(u8); break; #endif case NFT_META_TIME_NS: len = sizeof(u64); break; case NFT_META_TIME_DAY: len = sizeof(u8); break; case NFT_META_TIME_HOUR: len = sizeof(u32); break; default: return -EOPNOTSUPP; } priv->len = len; return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, len); } EXPORT_SYMBOL_GPL(nft_meta_get_init); static int nft_meta_get_validate_sdif(const struct nft_ctx *ctx) { unsigned int hooks; switch (ctx->family) { case NFPROTO_IPV4: case NFPROTO_IPV6: case NFPROTO_INET: hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD); break; default: return -EOPNOTSUPP; } return nft_chain_validate_hooks(ctx->chain, hooks); } static int nft_meta_get_validate_xfrm(const struct nft_ctx *ctx) { #ifdef CONFIG_XFRM unsigned int hooks; switch (ctx->family) { case NFPROTO_NETDEV: hooks = 1 << NF_NETDEV_INGRESS; break; case NFPROTO_IPV4: case NFPROTO_IPV6: case NFPROTO_INET: hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD); break; default: return -EOPNOTSUPP; } return nft_chain_validate_hooks(ctx->chain, hooks); #else return 0; #endif } static int nft_meta_get_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { const struct nft_meta *priv = nft_expr_priv(expr); switch (priv->key) { case NFT_META_SECPATH: return nft_meta_get_validate_xfrm(ctx); case NFT_META_SDIF: case NFT_META_SDIFNAME: return nft_meta_get_validate_sdif(ctx); default: break; } return 0; } int nft_meta_set_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { struct nft_meta *priv = nft_expr_priv(expr); unsigned int hooks; if (priv->key != NFT_META_PKTTYPE) return 0; switch (ctx->family) { case NFPROTO_BRIDGE: hooks = 1 << NF_BR_PRE_ROUTING; break; case NFPROTO_NETDEV: hooks = 1 << NF_NETDEV_INGRESS; break; case NFPROTO_IPV4: case NFPROTO_IPV6: case NFPROTO_INET: hooks = 1 << NF_INET_PRE_ROUTING; break; default: return -EOPNOTSUPP; } return nft_chain_validate_hooks(ctx->chain, hooks); } EXPORT_SYMBOL_GPL(nft_meta_set_validate); int nft_meta_set_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_meta *priv = nft_expr_priv(expr); unsigned int len; int err; priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); switch (priv->key) { case NFT_META_MARK: case NFT_META_PRIORITY: #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: #endif len = sizeof(u32); break; case NFT_META_NFTRACE: len = sizeof(u8); break; case NFT_META_PKTTYPE: len = sizeof(u8); break; default: return -EOPNOTSUPP; } priv->len = len; err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len); if (err < 0) return err; if (priv->key == NFT_META_NFTRACE) static_branch_inc(&nft_trace_enabled); return 0; } EXPORT_SYMBOL_GPL(nft_meta_set_init); int nft_meta_get_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_meta *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key))) goto nla_put_failure; if (nft_dump_register(skb, NFTA_META_DREG, priv->dreg)) goto nla_put_failure; return 0; nla_put_failure: return -1; } EXPORT_SYMBOL_GPL(nft_meta_get_dump); int nft_meta_set_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_meta *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key))) goto nla_put_failure; if (nft_dump_register(skb, NFTA_META_SREG, priv->sreg)) goto nla_put_failure; return 0; nla_put_failure: return -1; } EXPORT_SYMBOL_GPL(nft_meta_set_dump); void nft_meta_set_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { const struct nft_meta *priv = nft_expr_priv(expr); if (priv->key == NFT_META_NFTRACE) static_branch_dec(&nft_trace_enabled); } EXPORT_SYMBOL_GPL(nft_meta_set_destroy); static int nft_meta_get_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr) { const struct nft_meta *priv = nft_expr_priv(expr); struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; switch (priv->key) { case NFT_META_PROTOCOL: NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto, sizeof(__u16), reg); nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); break; case NFT_META_L4PROTO: NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto, sizeof(__u8), reg); nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); break; case NFT_META_IIF: NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta, ingress_ifindex, sizeof(__u32), reg); break; case NFT_META_IIFTYPE: NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta, ingress_iftype, sizeof(__u16), reg); break; default: return -EOPNOTSUPP; } return 0; } bool nft_meta_get_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { const struct nft_meta *priv = nft_expr_priv(expr); const struct nft_meta *meta; if (!nft_reg_track_cmp(track, expr, priv->dreg)) { nft_reg_track_update(track, expr, priv->dreg, priv->len); return false; } meta = nft_expr_priv(track->regs[priv->dreg].selector); if (priv->key != meta->key || priv->dreg != meta->dreg) { nft_reg_track_update(track, expr, priv->dreg, priv->len); return false; } if (!track->regs[priv->dreg].bitwise) return true; return nft_expr_reduce_bitwise(track, expr); } EXPORT_SYMBOL_GPL(nft_meta_get_reduce); static const struct nft_expr_ops nft_meta_get_ops = { .type = &nft_meta_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), .eval = nft_meta_get_eval, .init = nft_meta_get_init, .dump = nft_meta_get_dump, .reduce = nft_meta_get_reduce, .validate = nft_meta_get_validate, .offload = nft_meta_get_offload, }; static bool nft_meta_set_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { int i; for (i = 0; i < NFT_REG32_NUM; i++) { if (!track->regs[i].selector) continue; if (track->regs[i].selector->ops != &nft_meta_get_ops) continue; __nft_reg_track_cancel(track, i); } return false; } static const struct nft_expr_ops nft_meta_set_ops = { .type = &nft_meta_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), .eval = nft_meta_set_eval, .init = nft_meta_set_init, .destroy = nft_meta_set_destroy, .dump = nft_meta_set_dump, .reduce = nft_meta_set_reduce, .validate = nft_meta_set_validate, }; static const struct nft_expr_ops * nft_meta_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { if (tb[NFTA_META_KEY] == NULL) return ERR_PTR(-EINVAL); if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) return ERR_PTR(-EINVAL); #if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) && IS_MODULE(CONFIG_NFT_BRIDGE_META) if (ctx->family == NFPROTO_BRIDGE) return ERR_PTR(-EAGAIN); #endif if (tb[NFTA_META_DREG]) return &nft_meta_get_ops; if (tb[NFTA_META_SREG]) return &nft_meta_set_ops; return ERR_PTR(-EINVAL); } static int nft_meta_inner_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_meta *priv = nft_expr_priv(expr); unsigned int len; if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG]) return -EINVAL; priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); switch (priv->key) { case NFT_META_PROTOCOL: len = sizeof(u16); break; case NFT_META_L4PROTO: len = sizeof(u32); break; default: return -EOPNOTSUPP; } priv->len = len; return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, len); } void nft_meta_inner_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt, struct nft_inner_tun_ctx *tun_ctx) { const struct nft_meta *priv = nft_expr_priv(expr); u32 *dest = &regs->data[priv->dreg]; switch (priv->key) { case NFT_META_PROTOCOL: nft_reg_store16(dest, (__force u16)tun_ctx->llproto); break; case NFT_META_L4PROTO: if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH)) goto err; nft_reg_store8(dest, tun_ctx->l4proto); break; default: WARN_ON_ONCE(1); goto err; } return; err: regs->verdict.code = NFT_BREAK; } EXPORT_SYMBOL_GPL(nft_meta_inner_eval); static const struct nft_expr_ops nft_meta_inner_ops = { .type = &nft_meta_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), .init = nft_meta_inner_init, .dump = nft_meta_get_dump, /* direct call to nft_meta_inner_eval(). */ }; struct nft_expr_type nft_meta_type __read_mostly = { .name = "meta", .select_ops = nft_meta_select_ops, .inner_ops = &nft_meta_inner_ops, .policy = nft_meta_policy, .maxattr = NFTA_META_MAX, .owner = THIS_MODULE, }; #ifdef CONFIG_NETWORK_SECMARK struct nft_secmark { u32 secid; char *ctx; }; static const struct nla_policy nft_secmark_policy[NFTA_SECMARK_MAX + 1] = { [NFTA_SECMARK_CTX] = { .type = NLA_STRING, .len = NFT_SECMARK_CTX_MAXLEN }, }; static int nft_secmark_compute_secid(struct nft_secmark *priv) { u32 tmp_secid = 0; int err; err = security_secctx_to_secid(priv->ctx, strlen(priv->ctx), &tmp_secid); if (err) return err; if (!tmp_secid) return -ENOENT; err = security_secmark_relabel_packet(tmp_secid); if (err) return err; priv->secid = tmp_secid; return 0; } static void nft_secmark_obj_eval(struct nft_object *obj, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_secmark *priv = nft_obj_data(obj); struct sk_buff *skb = pkt->skb; skb->secmark = priv->secid; } static int nft_secmark_obj_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_object *obj) { struct nft_secmark *priv = nft_obj_data(obj); int err; if (tb[NFTA_SECMARK_CTX] == NULL) return -EINVAL; priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL); if (!priv->ctx) return -ENOMEM; err = nft_secmark_compute_secid(priv); if (err) { kfree(priv->ctx); return err; } security_secmark_refcount_inc(); return 0; } static int nft_secmark_obj_dump(struct sk_buff *skb, struct nft_object *obj, bool reset) { struct nft_secmark *priv = nft_obj_data(obj); int err; if (nla_put_string(skb, NFTA_SECMARK_CTX, priv->ctx)) return -1; if (reset) { err = nft_secmark_compute_secid(priv); if (err) return err; } return 0; } static void nft_secmark_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj) { struct nft_secmark *priv = nft_obj_data(obj); security_secmark_refcount_dec(); kfree(priv->ctx); } static const struct nft_object_ops nft_secmark_obj_ops = { .type = &nft_secmark_obj_type, .size = sizeof(struct nft_secmark), .init = nft_secmark_obj_init, .eval = nft_secmark_obj_eval, .dump = nft_secmark_obj_dump, .destroy = nft_secmark_obj_destroy, }; struct nft_object_type nft_secmark_obj_type __read_mostly = { .type = NFT_OBJECT_SECMARK, .ops = &nft_secmark_obj_ops, .maxattr = NFTA_SECMARK_MAX, .policy = nft_secmark_policy, .owner = THIS_MODULE, }; #endif /* CONFIG_NETWORK_SECMARK */
4455 4467 3382 3935 340 299 55 41 797 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * SipHash: a fast short-input PRF * https://131002.net/siphash/ * * This implementation is specifically for SipHash2-4 for a secure PRF * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for * hashtables. */ #include <linux/siphash.h> #include <asm/unaligned.h> #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 #include <linux/dcache.h> #include <asm/word-at-a-time.h> #endif #define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) #define PREAMBLE(len) \ u64 v0 = SIPHASH_CONST_0; \ u64 v1 = SIPHASH_CONST_1; \ u64 v2 = SIPHASH_CONST_2; \ u64 v3 = SIPHASH_CONST_3; \ u64 b = ((u64)(len)) << 56; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ v1 ^= key->key[1]; \ v0 ^= key->key[0]; #define POSTAMBLE \ v3 ^= b; \ SIPROUND; \ SIPROUND; \ v0 ^= b; \ v2 ^= 0xff; \ SIPROUND; \ SIPROUND; \ SIPROUND; \ SIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; PREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = le64_to_cpup(data); v3 ^= m; SIPROUND; SIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= le32_to_cpup(data); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } #endif POSTAMBLE } EXPORT_SYMBOL(__siphash_aligned); #endif u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; PREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = get_unaligned_le64(data); v3 ^= m; SIPROUND; SIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= get_unaligned_le32(end); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } #endif POSTAMBLE } EXPORT_SYMBOL(__siphash_unaligned); /** * siphash_1u64 - compute 64-bit siphash PRF value of a u64 * @first: first u64 * @key: the siphash key */ u64 siphash_1u64(const u64 first, const siphash_key_t *key) { PREAMBLE(8) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; POSTAMBLE } EXPORT_SYMBOL(siphash_1u64); /** * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 * @first: first u64 * @second: second u64 * @key: the siphash key */ u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) { PREAMBLE(16) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; v3 ^= second; SIPROUND; SIPROUND; v0 ^= second; POSTAMBLE } EXPORT_SYMBOL(siphash_2u64); /** * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 * @first: first u64 * @second: second u64 * @third: third u64 * @key: the siphash key */ u64 siphash_3u64(const u64 first, const u64 second, const u64 third, const siphash_key_t *key) { PREAMBLE(24) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; v3 ^= second; SIPROUND; SIPROUND; v0 ^= second; v3 ^= third; SIPROUND; SIPROUND; v0 ^= third; POSTAMBLE } EXPORT_SYMBOL(siphash_3u64); /** * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 * @first: first u64 * @second: second u64 * @third: third u64 * @forth: forth u64 * @key: the siphash key */ u64 siphash_4u64(const u64 first, const u64 second, const u64 third, const u64 forth, const siphash_key_t *key) { PREAMBLE(32) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; v3 ^= second; SIPROUND; SIPROUND; v0 ^= second; v3 ^= third; SIPROUND; SIPROUND; v0 ^= third; v3 ^= forth; SIPROUND; SIPROUND; v0 ^= forth; POSTAMBLE } EXPORT_SYMBOL(siphash_4u64); u64 siphash_1u32(const u32 first, const siphash_key_t *key) { PREAMBLE(4) b |= first; POSTAMBLE } EXPORT_SYMBOL(siphash_1u32); u64 siphash_3u32(const u32 first, const u32 second, const u32 third, const siphash_key_t *key) { u64 combined = (u64)second << 32 | first; PREAMBLE(12) v3 ^= combined; SIPROUND; SIPROUND; v0 ^= combined; b |= third; POSTAMBLE } EXPORT_SYMBOL(siphash_3u32); #if BITS_PER_LONG == 64 /* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. */ #define HSIPROUND SIPROUND #define HPREAMBLE(len) PREAMBLE(len) #define HPOSTAMBLE \ v3 ^= b; \ HSIPROUND; \ v0 ^= b; \ v2 ^= 0xff; \ HSIPROUND; \ HSIPROUND; \ HSIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; HPREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = le64_to_cpup(data); v3 ^= m; HSIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= le32_to_cpup(data); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } #endif HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); #endif u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; HPREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = get_unaligned_le64(data); v3 ^= m; HSIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= get_unaligned_le32(end); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } #endif HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); /** * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 * @first: first u32 * @key: the hsiphash key */ u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) { HPREAMBLE(4) b |= first; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_1u32); /** * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 * @first: first u32 * @second: second u32 * @key: the hsiphash key */ u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) { u64 combined = (u64)second << 32 | first; HPREAMBLE(8) v3 ^= combined; HSIPROUND; v0 ^= combined; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_2u32); /** * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @key: the hsiphash key */ u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, const hsiphash_key_t *key) { u64 combined = (u64)second << 32 | first; HPREAMBLE(12) v3 ^= combined; HSIPROUND; v0 ^= combined; b |= third; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_3u32); /** * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @forth: forth u32 * @key: the hsiphash key */ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, const u32 forth, const hsiphash_key_t *key) { u64 combined = (u64)second << 32 | first; HPREAMBLE(16) v3 ^= combined; HSIPROUND; v0 ^= combined; combined = (u64)forth << 32 | third; v3 ^= combined; HSIPROUND; v0 ^= combined; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_4u32); #else #define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3) #define HPREAMBLE(len) \ u32 v0 = HSIPHASH_CONST_0; \ u32 v1 = HSIPHASH_CONST_1; \ u32 v2 = HSIPHASH_CONST_2; \ u32 v3 = HSIPHASH_CONST_3; \ u32 b = ((u32)(len)) << 24; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ v1 ^= key->key[1]; \ v0 ^= key->key[0]; #define HPOSTAMBLE \ v3 ^= b; \ HSIPROUND; \ v0 ^= b; \ v2 ^= 0xff; \ HSIPROUND; \ HSIPROUND; \ HSIPROUND; \ return v1 ^ v3; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); const u8 left = len & (sizeof(u32) - 1); u32 m; HPREAMBLE(len) for (; data != end; data += sizeof(u32)) { m = le32_to_cpup(data); v3 ^= m; HSIPROUND; v0 ^= m; } switch (left) { case 3: b |= ((u32)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); #endif u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); const u8 left = len & (sizeof(u32) - 1); u32 m; HPREAMBLE(len) for (; data != end; data += sizeof(u32)) { m = get_unaligned_le32(data); v3 ^= m; HSIPROUND; v0 ^= m; } switch (left) { case 3: b |= ((u32)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); /** * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 * @first: first u32 * @key: the hsiphash key */ u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) { HPREAMBLE(4) v3 ^= first; HSIPROUND; v0 ^= first; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_1u32); /** * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 * @first: first u32 * @second: second u32 * @key: the hsiphash key */ u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) { HPREAMBLE(8) v3 ^= first; HSIPROUND; v0 ^= first; v3 ^= second; HSIPROUND; v0 ^= second; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_2u32); /** * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @key: the hsiphash key */ u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, const hsiphash_key_t *key) { HPREAMBLE(12) v3 ^= first; HSIPROUND; v0 ^= first; v3 ^= second; HSIPROUND; v0 ^= second; v3 ^= third; HSIPROUND; v0 ^= third; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_3u32); /** * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @forth: forth u32 * @key: the hsiphash key */ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, const u32 forth, const hsiphash_key_t *key) { HPREAMBLE(16) v3 ^= first; HSIPROUND; v0 ^= first; v3 ^= second; HSIPROUND; v0 ^= second; v3 ^= third; HSIPROUND; v0 ^= third; v3 ^= forth; HSIPROUND; v0 ^= forth; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_4u32); #endif
30 13 3 13 46 13 13 13 13 13 3 3 13 13 13 13 3 3 30 28 30 25 25 25 11 6 8 5 14 14 14 5 5 6 336 331 5 3 27 3 2 24 3 2 2 33 33 11 11 8 3 3 3 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 // SPDX-License-Identifier: GPL-2.0-only /* * Page Attribute Table (PAT) support: handle memory caching attributes in page tables. * * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * Suresh B Siddha <suresh.b.siddha@intel.com> * * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. * * Basic principles: * * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and * the kernel to set one of a handful of 'caching type' attributes for physical * memory ranges: uncached, write-combining, write-through, write-protected, * and the most commonly used and default attribute: write-back caching. * * PAT support supersedes and augments MTRR support in a compatible fashion: MTRR is * a hardware interface to enumerate a limited number of physical memory ranges * and set their caching attributes explicitly, programmed into the CPU via MSRs. * Even modern CPUs have MTRRs enabled - but these are typically not touched * by the kernel or by user-space (such as the X server), we rely on PAT for any * additional cache attribute logic. * * PAT doesn't work via explicit memory ranges, but uses page table entries to add * cache attribute information to the mapped memory range: there's 3 bits used, * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT). * * ( There's a metric ton of finer details, such as compatibility with CPU quirks * that only support 4 types of PAT entries, and interaction with MTRRs, see * below for details. ) */ #include <linux/seq_file.h> #include <linux/memblock.h> #include <linux/debugfs.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/pfn_t.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/fs.h> #include <linux/rbtree.h> #include <asm/cacheflush.h> #include <asm/cacheinfo.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/x86_init.h> #include <asm/fcntl.h> #include <asm/e820/api.h> #include <asm/mtrr.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/memtype.h> #include <asm/io.h> #include "memtype.h" #include "../mm_internal.h" #undef pr_fmt #define pr_fmt(fmt) "" fmt static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); static u64 __ro_after_init pat_msr_val; /* * PAT support is enabled by default, but can be disabled for * various user-requested or hardware-forced reasons: */ static void __init pat_disable(const char *msg_reason) { if (pat_disabled) return; pat_disabled = true; pr_info("x86/PAT: %s\n", msg_reason); memory_caching_control &= ~CACHE_PAT; } static int __init nopat(char *str) { pat_disable("PAT support disabled via boot option."); return 0; } early_param("nopat", nopat); bool pat_enabled(void) { return !pat_disabled; } EXPORT_SYMBOL_GPL(pat_enabled); int pat_debug_enable; static int __init pat_debug_setup(char *str) { pat_debug_enable = 1; return 1; } __setup("debugpat", pat_debug_setup); #ifdef CONFIG_X86_PAT /* * X86 PAT uses page flags arch_1 and uncached together to keep track of * memory type of pages that have backing page struct. * * X86 PAT supports 4 different memory types: * - _PAGE_CACHE_MODE_WB * - _PAGE_CACHE_MODE_WC * - _PAGE_CACHE_MODE_UC_MINUS * - _PAGE_CACHE_MODE_WT * * _PAGE_CACHE_MODE_WB is the default type. */ #define _PGMT_WB 0 #define _PGMT_WC (1UL << PG_arch_1) #define _PGMT_UC_MINUS (1UL << PG_uncached) #define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1) #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) #define _PGMT_CLEAR_MASK (~_PGMT_MASK) static inline enum page_cache_mode get_page_memtype(struct page *pg) { unsigned long pg_flags = pg->flags & _PGMT_MASK; if (pg_flags == _PGMT_WB) return _PAGE_CACHE_MODE_WB; else if (pg_flags == _PGMT_WC) return _PAGE_CACHE_MODE_WC; else if (pg_flags == _PGMT_UC_MINUS) return _PAGE_CACHE_MODE_UC_MINUS; else return _PAGE_CACHE_MODE_WT; } static inline void set_page_memtype(struct page *pg, enum page_cache_mode memtype) { unsigned long memtype_flags; unsigned long old_flags; unsigned long new_flags; switch (memtype) { case _PAGE_CACHE_MODE_WC: memtype_flags = _PGMT_WC; break; case _PAGE_CACHE_MODE_UC_MINUS: memtype_flags = _PGMT_UC_MINUS; break; case _PAGE_CACHE_MODE_WT: memtype_flags = _PGMT_WT; break; case _PAGE_CACHE_MODE_WB: default: memtype_flags = _PGMT_WB; break; } old_flags = READ_ONCE(pg->flags); do { new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; } while (!try_cmpxchg(&pg->flags, &old_flags, new_flags)); } #else static inline enum page_cache_mode get_page_memtype(struct page *pg) { return -1; } static inline void set_page_memtype(struct page *pg, enum page_cache_mode memtype) { } #endif enum { PAT_UC = 0, /* uncached */ PAT_WC = 1, /* Write combining */ PAT_WT = 4, /* Write Through */ PAT_WP = 5, /* Write Protected */ PAT_WB = 6, /* Write Back (default) */ PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */ }; #define CM(c) (_PAGE_CACHE_MODE_ ## c) static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val, char *msg) { enum page_cache_mode cache; char *cache_mode; switch (pat_val) { case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; default: cache = CM(WB); cache_mode = "WB "; break; } memcpy(msg, cache_mode, 4); return cache; } #undef CM /* * Update the cache mode to pgprot translation tables according to PAT * configuration. * Using lower indices is preferred, so we start with highest index. */ static void __init init_cache_modes(u64 pat) { enum page_cache_mode cache; char pat_msg[33]; int i; pat_msg[32] = 0; for (i = 7; i >= 0; i--) { cache = pat_get_cache_mode((pat >> (i * 8)) & 7, pat_msg + 4 * i); update_cache_mode_entry(i, cache); } pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); } void pat_cpu_init(void) { if (!boot_cpu_has(X86_FEATURE_PAT)) { /* * If this happens we are on a secondary CPU, but switched to * PAT on the boot CPU. We have no way to undo PAT. */ panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); } wrmsrl(MSR_IA32_CR_PAT, pat_msr_val); __flush_tlb_all(); } /** * pat_bp_init - Initialize the PAT MSR value and PAT table * * This function initializes PAT MSR value and PAT table with an OS-defined * value to enable additional cache attributes, WC, WT and WP. * * This function prepares the calls of pat_cpu_init() via cache_cpu_init() * on all CPUs. */ void __init pat_bp_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; #define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \ (((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) | \ ((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) | \ ((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) | \ ((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56)) if (!IS_ENABLED(CONFIG_X86_PAT)) pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n"); if (!cpu_feature_enabled(X86_FEATURE_PAT)) pat_disable("PAT not supported by the CPU."); else rdmsrl(MSR_IA32_CR_PAT, pat_msr_val); if (!pat_msr_val) { pat_disable("PAT support disabled by the firmware."); /* * No PAT. Emulate the PAT table that corresponds to the two * cache bits, PWT (Write Through) and PCD (Cache Disable). * This setup is also the same as the BIOS default setup. * * PTE encoding: * * PCD * |PWT PAT * || slot * 00 0 WB : _PAGE_CACHE_MODE_WB * 01 1 WT : _PAGE_CACHE_MODE_WT * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS * 11 3 UC : _PAGE_CACHE_MODE_UC * * NOTE: When WC or WP is used, it is redirected to UC- per * the default setup in __cachemode2pte_tbl[]. */ pat_msr_val = PAT(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC); } /* * Xen PV doesn't allow to set PAT MSR, but all cache modes are * supported. */ if (pat_disabled || cpu_feature_enabled(X86_FEATURE_XENPV)) { init_cache_modes(pat_msr_val); return; } if ((c->x86_vendor == X86_VENDOR_INTEL) && (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) { /* * PAT support with the lower four entries. Intel Pentium 2, * 3, M, and 4 are affected by PAT errata, which makes the * upper four entries unusable. To be on the safe side, we don't * use those. * * PTE encoding: * PAT * |PCD * ||PWT PAT * ||| slot * 000 0 WB : _PAGE_CACHE_MODE_WB * 001 1 WC : _PAGE_CACHE_MODE_WC * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS * 011 3 UC : _PAGE_CACHE_MODE_UC * PAT bit unused * * NOTE: When WT or WP is used, it is redirected to UC- per * the default setup in __cachemode2pte_tbl[]. */ pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC); } else { /* * Full PAT support. We put WT in slot 7 to improve * robustness in the presence of errata that might cause * the high PAT bit to be ignored. This way, a buggy slot 7 * access will hit slot 3, and slot 3 is UC, so at worst * we lose performance without causing a correctness issue. * Pentium 4 erratum N46 is an example for such an erratum, * although we try not to use PAT at all on affected CPUs. * * PTE encoding: * PAT * |PCD * ||PWT PAT * ||| slot * 000 0 WB : _PAGE_CACHE_MODE_WB * 001 1 WC : _PAGE_CACHE_MODE_WC * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS * 011 3 UC : _PAGE_CACHE_MODE_UC * 100 4 WB : Reserved * 101 5 WP : _PAGE_CACHE_MODE_WP * 110 6 UC-: Reserved * 111 7 WT : _PAGE_CACHE_MODE_WT * * The reserved slots are unused, but mapped to their * corresponding types in the presence of PAT errata. */ pat_msr_val = PAT(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT); } memory_caching_control |= CACHE_PAT; init_cache_modes(pat_msr_val); #undef PAT } static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ /* * Does intersection of PAT memory type and MTRR memory type and returns * the resulting memory type as PAT understands it. * (Type in pat and mtrr will not have same value) * The intersection is based on "Effective Memory Type" tables in IA-32 * SDM vol 3a */ static unsigned long pat_x_mtrr_type(u64 start, u64 end, enum page_cache_mode req_type) { /* * Look for MTRR hint to get the effective type in case where PAT * request is for WB. */ if (req_type == _PAGE_CACHE_MODE_WB) { u8 mtrr_type, uniform; mtrr_type = mtrr_type_lookup(start, end, &uniform); if (mtrr_type != MTRR_TYPE_WRBACK) return _PAGE_CACHE_MODE_UC_MINUS; return _PAGE_CACHE_MODE_WB; } return req_type; } struct pagerange_state { unsigned long cur_pfn; int ram; int not_ram; }; static int pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) { struct pagerange_state *state = arg; state->not_ram |= initial_pfn > state->cur_pfn; state->ram |= total_nr_pages > 0; state->cur_pfn = initial_pfn + total_nr_pages; return state->ram && state->not_ram; } static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) { int ret = 0; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; struct pagerange_state state = {start_pfn, 0, 0}; /* * For legacy reasons, physical address range in the legacy ISA * region is tracked as non-RAM. This will allow users of * /dev/mem to map portions of legacy ISA region, even when * some of those portions are listed(or not even listed) with * different e820 types(RAM/reserved/..) */ if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; if (start_pfn < end_pfn) { ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &state, pagerange_is_ram_callback); } return (ret > 0) ? -1 : (state.ram ? 1 : 0); } /* * For RAM pages, we use page flags to mark the pages with appropriate type. * The page flags are limited to four types, WB (default), WC, WT and UC-. * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting * a new memory type is only allowed for a page mapped with the default WB * type. * * Here we do two passes: * - Find the memtype of all the pages in the range, look for any conflicts. * - In case of no conflicts, set the new memtype for pages in the range. */ static int reserve_ram_pages_type(u64 start, u64 end, enum page_cache_mode req_type, enum page_cache_mode *new_type) { struct page *page; u64 pfn; if (req_type == _PAGE_CACHE_MODE_WP) { if (new_type) *new_type = _PAGE_CACHE_MODE_UC_MINUS; return -EINVAL; } if (req_type == _PAGE_CACHE_MODE_UC) { /* We do not support strong UC */ WARN_ON_ONCE(1); req_type = _PAGE_CACHE_MODE_UC_MINUS; } for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { enum page_cache_mode type; page = pfn_to_page(pfn); type = get_page_memtype(page); if (type != _PAGE_CACHE_MODE_WB) { pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", start, end - 1, type, req_type); if (new_type) *new_type = type; return -EBUSY; } } if (new_type) *new_type = req_type; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, req_type); } return 0; } static int free_ram_pages_type(u64 start, u64 end) { struct page *page; u64 pfn; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, _PAGE_CACHE_MODE_WB); } return 0; } static u64 sanitize_phys(u64 address) { /* * When changing the memtype for pages containing poison allow * for a "decoy" virtual address (bit 63 clear) passed to * set_memory_X(). __pa() on a "decoy" address results in a * physical address with bit 63 set. * * Decoy addresses are not present for 32-bit builds, see * set_mce_nospec(). */ if (IS_ENABLED(CONFIG_X86_64)) return address & __PHYSICAL_MASK; return address; } /* * req_type typically has one of the: * - _PAGE_CACHE_MODE_WB * - _PAGE_CACHE_MODE_WC * - _PAGE_CACHE_MODE_UC_MINUS * - _PAGE_CACHE_MODE_UC * - _PAGE_CACHE_MODE_WT * * If new_type is NULL, function will return an error if it cannot reserve the * region with req_type. If new_type is non-NULL, function will return * available type in new_type in case of no error. In case of any error * it will return a negative return value. */ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, enum page_cache_mode *new_type) { struct memtype *entry_new; enum page_cache_mode actual_type; int is_range_ram; int err = 0; start = sanitize_phys(start); /* * The end address passed into this function is exclusive, but * sanitize_phys() expects an inclusive address. */ end = sanitize_phys(end - 1) + 1; if (start >= end) { WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, start, end - 1, cattr_name(req_type)); return -EINVAL; } if (!pat_enabled()) { /* This is identical to page table setting without PAT */ if (new_type) *new_type = req_type; return 0; } /* Low ISA region is always mapped WB in page table. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) { if (new_type) *new_type = _PAGE_CACHE_MODE_WB; return 0; } /* * Call mtrr_lookup to get the type hint. This is an * optimization for /dev/mem mmap'ers into WB memory (BIOS * tools and ACPI tools). Use WB request for WB memory and use * UC_MINUS otherwise. */ actual_type = pat_x_mtrr_type(start, end, req_type); if (new_type) *new_type = actual_type; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { err = reserve_ram_pages_type(start, end, req_type, new_type); return err; } else if (is_range_ram < 0) { return -EINVAL; } entry_new = kzalloc(sizeof(struct memtype), GFP_KERNEL); if (!entry_new) return -ENOMEM; entry_new->start = start; entry_new->end = end; entry_new->type = actual_type; spin_lock(&memtype_lock); err = memtype_check_insert(entry_new, new_type); if (err) { pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n", start, end - 1, cattr_name(entry_new->type), cattr_name(req_type)); kfree(entry_new); spin_unlock(&memtype_lock); return err; } spin_unlock(&memtype_lock); dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", start, end - 1, cattr_name(entry_new->type), cattr_name(req_type), new_type ? cattr_name(*new_type) : "-"); return err; } int memtype_free(u64 start, u64 end) { int is_range_ram; struct memtype *entry_old; if (!pat_enabled()) return 0; start = sanitize_phys(start); end = sanitize_phys(end); /* Low ISA region is always mapped WB. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) return 0; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) return free_ram_pages_type(start, end); if (is_range_ram < 0) return -EINVAL; spin_lock(&memtype_lock); entry_old = memtype_erase(start, end); spin_unlock(&memtype_lock); if (IS_ERR(entry_old)) { pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", current->comm, current->pid, start, end - 1); return -EINVAL; } kfree(entry_old); dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1); return 0; } /** * lookup_memtype - Looks up the memory type for a physical address * @paddr: physical address of which memory type needs to be looked up * * Only to be called when PAT is enabled * * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS * or _PAGE_CACHE_MODE_WT. */ static enum page_cache_mode lookup_memtype(u64 paddr) { enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; struct memtype *entry; if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) return rettype; if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { struct page *page; page = pfn_to_page(paddr >> PAGE_SHIFT); return get_page_memtype(page); } spin_lock(&memtype_lock); entry = memtype_lookup(paddr); if (entry != NULL) rettype = entry->type; else rettype = _PAGE_CACHE_MODE_UC_MINUS; spin_unlock(&memtype_lock); return rettype; } /** * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type * of @pfn cannot be overridden by UC MTRR memory type. * * Only to be called when PAT is enabled. * * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC. * Returns false in other cases. */ bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) { enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn)); return cm == _PAGE_CACHE_MODE_UC || cm == _PAGE_CACHE_MODE_UC_MINUS || cm == _PAGE_CACHE_MODE_WC; } EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); /** * memtype_reserve_io - Request a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region * @type: A pointer to memtype, with requested type. On success, requested * or any other compatible type that was available for the region is returned * * On success, returns 0 * On failure, returns non-zero */ int memtype_reserve_io(resource_size_t start, resource_size_t end, enum page_cache_mode *type) { resource_size_t size = end - start; enum page_cache_mode req_type = *type; enum page_cache_mode new_type; int ret; WARN_ON_ONCE(iomem_map_sanity_check(start, size)); ret = memtype_reserve(start, end, req_type, &new_type); if (ret) goto out_err; if (!is_new_memtype_allowed(start, size, req_type, new_type)) goto out_free; if (memtype_kernel_map_sync(start, size, new_type) < 0) goto out_free; *type = new_type; return 0; out_free: memtype_free(start, end); ret = -EBUSY; out_err: return ret; } /** * memtype_free_io - Release a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region */ void memtype_free_io(resource_size_t start, resource_size_t end) { memtype_free(start, end); } #ifdef CONFIG_X86_PAT int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) { enum page_cache_mode type = _PAGE_CACHE_MODE_WC; return memtype_reserve_io(start, start + size, &type); } EXPORT_SYMBOL(arch_io_reserve_memtype_wc); void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) { memtype_free_io(start, start + size); } EXPORT_SYMBOL(arch_io_free_memtype_wc); #endif pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size)) vma_prot = pgprot_decrypted(vma_prot); return vma_prot; } #ifdef CONFIG_STRICT_DEVMEM /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #else /* This check is needed to avoid cache aliasing when PAT is enabled */ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; if (!pat_enabled()) return 1; while (cursor < to) { if (!devmem_is_allowed(pfn)) return 0; cursor += PAGE_SIZE; pfn++; } return 1; } #endif /* CONFIG_STRICT_DEVMEM */ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; if (!range_is_allowed(pfn, size)) return 0; if (file->f_flags & O_DSYNC) pcm = _PAGE_CACHE_MODE_UC_MINUS; *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | cachemode2protval(pcm)); return 1; } /* * Change the memory type for the physical address range in kernel identity * mapping space if that range is a part of identity map. */ int memtype_kernel_map_sync(u64 base, unsigned long size, enum page_cache_mode pcm) { unsigned long id_sz; if (base > __pa(high_memory-1)) return 0; /* * Some areas in the middle of the kernel identity range * are not mapped, for example the PCI space. */ if (!page_is_ram(base >> PAGE_SHIFT)) return 0; id_sz = (__pa(high_memory-1) <= base + size) ? __pa(high_memory) - base : size; if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", current->comm, current->pid, cattr_name(pcm), base, (unsigned long long)(base + size-1)); return -EINVAL; } return 0; } /* * Internal interface to reserve a range of physical memory with prot. * Reserved non RAM regions only and after successful memtype_reserve, * this func also keeps identity mapping (if any) in sync with this new prot. */ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; int ret; enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); enum page_cache_mode pcm = want_pcm; is_ram = pat_pagerange_is_ram(paddr, paddr + size); /* * reserve_pfn_range() for RAM pages. We do not refcount to keep * track of number of mappings of RAM pages. We can assert that * the type requested matches the type of first page in the range. */ if (is_ram) { if (!pat_enabled()) return 0; pcm = lookup_memtype(paddr); if (want_pcm != pcm) { pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), cattr_name(pcm)); *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); } return 0; } ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm); if (ret) return ret; if (pcm != want_pcm) { if (strict_prot || !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { memtype_free(paddr, paddr + size); pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), cattr_name(pcm)); return -EINVAL; } /* * We allow returning different type than the one requested in * non strict case. */ *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); } if (memtype_kernel_map_sync(paddr, size, pcm) < 0) { memtype_free(paddr, paddr + size); return -EINVAL; } return 0; } /* * Internal interface to free a range of physical memory. * Frees non RAM regions only. */ static void free_pfn_range(u64 paddr, unsigned long size) { int is_ram; is_ram = pat_pagerange_is_ram(paddr, paddr + size); if (is_ram == 0) memtype_free(paddr, paddr + size); } static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, resource_size_t *phys) { pte_t *ptep, pte; spinlock_t *ptl; if (follow_pte(vma, vma->vm_start, &ptep, &ptl)) return -EINVAL; pte = ptep_get(ptep); /* Never return PFNs of anon folios in COW mappings. */ if (vm_normal_folio(vma, vma->vm_start, pte)) { pte_unmap_unlock(ptep, ptl); return -EINVAL; } *prot = pgprot_val(pte_pgprot(pte)); *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; pte_unmap_unlock(ptep, ptl); return 0; } static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, pgprot_t *pgprot) { unsigned long prot; VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT)); /* * We need the starting PFN and cachemode used for track_pfn_remap() * that covered the whole VMA. For most mappings, we can obtain that * information from the page tables. For COW mappings, we might now * suddenly have anon folios mapped and follow_phys() will fail. * * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to * detect the PFN. If we need the cachemode as well, we're out of luck * for now and have to fail fork(). */ if (!follow_phys(vma, &prot, paddr)) { if (pgprot) *pgprot = __pgprot(prot); return 0; } if (is_cow_mapping(vma->vm_flags)) { if (pgprot) return -EINVAL; *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; return 0; } WARN_ON_ONCE(1); return -EINVAL; } /* * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). * * If the vma has a linear pfn mapping for the entire range, we get the prot * from pte and reserve the entire vma range with single reserve_pfn_range call. */ int track_pfn_copy(struct vm_area_struct *vma) { resource_size_t paddr; unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; if (vma->vm_flags & VM_PAT) { if (get_pat_info(vma, &paddr, &pgprot)) return -EINVAL; /* reserve the whole chunk covered by vma. */ return reserve_pfn_range(paddr, vma_size, &pgprot, 1); } return 0; } /* * prot is passed in as a parameter for the new mapping. If the vma has * a linear pfn mapping for the entire range, or no vma is provided, * reserve the entire pfn + size range with single reserve_pfn_range * call. */ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; enum page_cache_mode pcm; /* reserve the whole chunk starting from paddr */ if (!vma || (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start))) { int ret; ret = reserve_pfn_range(paddr, size, prot, 0); if (ret == 0 && vma) vm_flags_set(vma, VM_PAT); return ret; } if (!pat_enabled()) return 0; /* * For anything smaller than the vma size we set prot based on the * lookup. */ pcm = lookup_memtype(paddr); /* Check memtype for the remaining pages */ while (size > PAGE_SIZE) { size -= PAGE_SIZE; paddr += PAGE_SIZE; if (pcm != lookup_memtype(paddr)) return -EINVAL; } *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); return 0; } void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) { enum page_cache_mode pcm; if (!pat_enabled()) return; /* Set prot based on lookup */ pcm = lookup_memtype(pfn_t_to_phys(pfn)); *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); } /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case pfn, size are zero). */ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size, bool mm_wr_locked) { resource_size_t paddr; if (vma && !(vma->vm_flags & VM_PAT)) return; /* free the chunk starting from pfn or the whole chunk */ paddr = (resource_size_t)pfn << PAGE_SHIFT; if (!paddr && !size) { if (get_pat_info(vma, &paddr, NULL)) return; size = vma->vm_end - vma->vm_start; } free_pfn_range(paddr, size); if (vma) { if (mm_wr_locked) vm_flags_clear(vma, VM_PAT); else __vm_flags_mod(vma, 0, VM_PAT); } } /* * untrack_pfn_clear is called if the following situation fits: * * 1) while mremapping a pfnmap for a new region, with the old vma after * its pfnmap page table has been removed. The new vma has a new pfnmap * to the same pfn & cache type with VM_PAT set. * 2) while duplicating vm area, the new vma fails to copy the pgtable from * old vma. */ void untrack_pfn_clear(struct vm_area_struct *vma) { vm_flags_clear(vma, VM_PAT); } pgprot_t pgprot_writecombine(pgprot_t prot) { return __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WC)); } EXPORT_SYMBOL_GPL(pgprot_writecombine); pgprot_t pgprot_writethrough(pgprot_t prot) { return __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WT)); } EXPORT_SYMBOL_GPL(pgprot_writethrough); #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) /* * We are allocating a temporary printout-entry to be passed * between seq_start()/next() and seq_show(): */ static struct memtype *memtype_get_idx(loff_t pos) { struct memtype *entry_print; int ret; entry_print = kzalloc(sizeof(struct memtype), GFP_KERNEL); if (!entry_print) return NULL; spin_lock(&memtype_lock); ret = memtype_copy_nth_element(entry_print, pos); spin_unlock(&memtype_lock); /* Free it on error: */ if (ret) { kfree(entry_print); return NULL; } return entry_print; } static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos == 0) { ++*pos; seq_puts(seq, "PAT memtype list:\n"); } return memtype_get_idx(*pos); } static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { kfree(v); ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { kfree(v); } static int memtype_seq_show(struct seq_file *seq, void *v) { struct memtype *entry_print = (struct memtype *)v; seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n", entry_print->start, entry_print->end, cattr_name(entry_print->type)); return 0; } static const struct seq_operations memtype_seq_ops = { .start = memtype_seq_start, .next = memtype_seq_next, .stop = memtype_seq_stop, .show = memtype_seq_show, }; static int memtype_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &memtype_seq_ops); } static const struct file_operations memtype_fops = { .open = memtype_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init pat_memtype_list_init(void) { if (pat_enabled()) { debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, NULL, &memtype_fops); } return 0; } late_initcall(pat_memtype_list_init); #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
8 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 // SPDX-License-Identifier: GPL-2.0+ /* * Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/cdrom.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/usb_usual.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-realtek" MODULE_DESCRIPTION("Driver for Realtek USB Card Reader"); MODULE_AUTHOR("wwang <wei_wang@realsil.com.cn>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(USB_STORAGE); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; module_param(ss_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_en, "enable selective suspend"); static int ss_delay = 50; module_param(ss_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_delay, "seconds to delay before entering selective suspend"); enum RTS51X_STAT { RTS51X_STAT_INIT, RTS51X_STAT_IDLE, RTS51X_STAT_RUN, RTS51X_STAT_SS }; #define POLLING_INTERVAL 50 #define rts51x_set_stat(chip, stat) \ ((chip)->state = (enum RTS51X_STAT)(stat)) #define rts51x_get_stat(chip) ((chip)->state) #define SET_LUN_READY(chip, lun) ((chip)->lun_ready |= ((u8)1 << (lun))) #define CLR_LUN_READY(chip, lun) ((chip)->lun_ready &= ~((u8)1 << (lun))) #define TST_LUN_READY(chip, lun) ((chip)->lun_ready & ((u8)1 << (lun))) #endif struct rts51x_status { u16 vid; u16 pid; u8 cur_lun; u8 card_type; u8 total_lun; u16 fw_ver; u8 phy_exist; u8 multi_flag; u8 multi_card; u8 log_exist; union { u8 detailed_type1; u8 detailed_type2; } detailed_type; u8 function[2]; }; struct rts51x_chip { u16 vendor_id; u16 product_id; char max_lun; struct rts51x_status *status; int status_len; u32 flag; struct us_data *us; #ifdef CONFIG_REALTEK_AUTOPM struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; u8 lun_ready; enum RTS51X_STAT state; int support_auto_delink; #endif /* used to back up the protocol chosen in probe1 phase */ proto_cmnd proto_handler_backup; }; /* flag definition */ #define FLIDX_AUTO_DELINK 0x01 #define SCSI_LUN(srb) ((srb)->device->lun) /* Bit Operation */ #define SET_BIT(data, idx) ((data) |= 1 << (idx)) #define CLR_BIT(data, idx) ((data) &= ~(1 << (idx))) #define CHK_BIT(data, idx) ((data) & (1 << (idx))) #define SET_AUTO_DELINK(chip) ((chip)->flag |= FLIDX_AUTO_DELINK) #define CLR_AUTO_DELINK(chip) ((chip)->flag &= ~FLIDX_AUTO_DELINK) #define CHK_AUTO_DELINK(chip) ((chip)->flag & FLIDX_AUTO_DELINK) #define RTS51X_GET_VID(chip) ((chip)->vendor_id) #define RTS51X_GET_PID(chip) ((chip)->product_id) #define VENDOR_ID(chip) ((chip)->status[0].vid) #define PRODUCT_ID(chip) ((chip)->status[0].pid) #define FW_VERSION(chip) ((chip)->status[0].fw_ver) #define STATUS_LEN(chip) ((chip)->status_len) #define STATUS_SUCCESS 0 #define STATUS_FAIL 1 /* Check card reader function */ #define SUPPORT_DETAILED_TYPE1(chip) \ CHK_BIT((chip)->status[0].function[0], 1) #define SUPPORT_OT(chip) \ CHK_BIT((chip)->status[0].function[0], 2) #define SUPPORT_OC(chip) \ CHK_BIT((chip)->status[0].function[0], 3) #define SUPPORT_AUTO_DELINK(chip) \ CHK_BIT((chip)->status[0].function[0], 4) #define SUPPORT_SDIO(chip) \ CHK_BIT((chip)->status[0].function[1], 0) #define SUPPORT_DETAILED_TYPE2(chip) \ CHK_BIT((chip)->status[0].function[1], 1) #define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid)) #define CHECK_FW_VER(chip, fw_ver) (FW_VERSION(chip) == (fw_ver)) #define CHECK_ID(chip, pid, fw_ver) \ (CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver))) static int init_realtek_cr(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ {\ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) \ } static const struct usb_device_id realtek_cr_ids[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, realtek_cr_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev realtek_cr_unusual_dev_list[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; #undef UNUSUAL_DEV static int rts51x_bulk_transport(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *)us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *)us->iobuf; int result; unsigned int residue; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* check bulk status */ if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) { usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n", le32_to_cpu(bcs->Signature), US_BULK_CS_SIGN); return USB_STOR_TRANSPORT_ERROR; } residue = bcs->Residue; if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR; /* * try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ if (residue) residue = residue < buf_len ? residue : buf_len; if (act_len) *act_len = buf_len - residue; /* based on the status code, we report good or bad */ switch (bcs->Status) { case US_BULK_STAT_OK: /* command good -- note that data could be short */ return USB_STOR_TRANSPORT_GOOD; case US_BULK_STAT_FAIL: /* command failed */ return USB_STOR_TRANSPORT_FAILED; case US_BULK_STAT_PHASE: /* * phase error -- note that a transport reset will be * invoked by the invoke_transport() function */ return USB_STOR_TRANSPORT_ERROR; } /* we should never get here, but if we do, we're in trouble */ return USB_STOR_TRANSPORT_ERROR; } static int rts51x_bulk_transport_special(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_bulk_msg(us->pusb_dev, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen, 250); return result; } /* Determine what the maximum LUN supported is */ static int rts51x_get_max_lun(struct us_data *us) { int result; /* issue the command */ us->iobuf[0] = 0; result = usb_stor_control_msg(us, us->recv_ctrl_pipe, US_BULK_GET_MAX_LUN, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, us->ifnum, us->iobuf, 1, 10 * HZ); usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n", result, us->iobuf[0]); /* if we have a successful request, return the result */ if (result > 0) return us->iobuf[0]; return 0; } static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return -ENOMEM; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0D; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_FROM_DEVICE, NULL); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(data, buf, len); kfree(buf); return 0; } static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmemdup(data, len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) return -EIO; return 0; } static int rts51x_read_status(struct us_data *us, u8 lun, u8 *status, int len, int *actlen) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; usb_stor_dbg(us, "lun = %d\n", lun); cmnd[0] = 0xF0; cmnd[1] = 0x09; retval = rts51x_bulk_transport(us, lun, cmnd, 12, buf, len, DMA_FROM_DEVICE, actlen); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(status, buf, len); kfree(buf); return 0; } static int rts51x_check_status(struct us_data *us, u8 lun) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[16]; retval = rts51x_read_status(us, lun, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) return -EIO; usb_stor_dbg(us, "chip->status_len = %d\n", chip->status_len); chip->status[lun].vid = ((u16) buf[0] << 8) | buf[1]; chip->status[lun].pid = ((u16) buf[2] << 8) | buf[3]; chip->status[lun].cur_lun = buf[4]; chip->status[lun].card_type = buf[5]; chip->status[lun].total_lun = buf[6]; chip->status[lun].fw_ver = ((u16) buf[7] << 8) | buf[8]; chip->status[lun].phy_exist = buf[9]; chip->status[lun].multi_flag = buf[10]; chip->status[lun].multi_card = buf[11]; chip->status[lun].log_exist = buf[12]; if (chip->status_len == 16) { chip->status[lun].detailed_type.detailed_type1 = buf[13]; chip->status[lun].function[0] = buf[14]; chip->status[lun].function[1] = buf[15]; } return 0; } static int enable_oscillator(struct us_data *us) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; value |= 0x04; retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (!(value & 0x04)) return -EIO; return 0; } static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len) { int retval; u8 cmnd[12] = {0}; u8 *buf; usb_stor_dbg(us, "addr = 0xfe47, len = %d\n", len); buf = kmemdup(data, len, GFP_NOIO); if (!buf) return USB_STOR_TRANSPORT_ERROR; cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = 0xfe; cmnd[3] = 0x47; cmnd[4] = (u8)(len >> 8); cmnd[5] = (u8)len; retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) { return -EIO; } return 0; } static int do_config_autodelink(struct us_data *us, int enable, int force) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (enable) { if (force) value |= 0x03; else value |= 0x01; } else { value &= ~0x03; } usb_stor_dbg(us, "set 0xfe47 to 0x%x\n", value); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; return 0; } static int config_autodelink_after_power_on(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (auto_delink_en) { CLR_BIT(value, 0); CLR_BIT(value, 1); SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); SET_BIT(value, 7); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; retval = enable_oscillator(us); if (retval == 0) (void)do_config_autodelink(us, 1, 0); } else { /* Autodelink controlled by firmware */ SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { CLR_BIT(value, 0); CLR_BIT(value, 7); } /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0xFF; retval = rts51x_write_mem(us, 0xFE79, &value, 1); if (retval < 0) return -EIO; value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } #ifdef CONFIG_PM static int config_autodelink_before_power_down(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; if (!CHK_AUTO_DELINK(chip)) return 0; if (auto_delink_en) { retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 0); if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; } else { if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880) || CHECK_ID(chip, 0x0138, 0x3882)) { retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { SET_BIT(value, 0); SET_BIT(value, 7); } if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; } if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } return 0; } static void fw5895_init(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 val; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, &val, 1); if (retval == STATUS_SUCCESS && (val & 0x1F) == 0) { val = 0x1F; retval = rts51x_write_mem(us, 0xFD70, &val, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail, OR (val & 0x1F) != 0\n"); } } } #endif #ifdef CONFIG_REALTEK_AUTOPM static void fw5895_set_mmc_wp(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[13]; if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { usb_stor_dbg(us, "Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, buf, 1); if (retval == STATUS_SUCCESS && (buf[0] & 0x24) == 0x24) { /* SD Exist and SD WP */ retval = rts51x_read_mem(us, 0xD04E, buf, 1); if (retval == STATUS_SUCCESS) { buf[0] |= 0x04; retval = rts51x_write_mem(us, 0xFD70, buf, 1); if (retval != STATUS_SUCCESS) usb_stor_dbg(us, "Write memory fail\n"); } else { usb_stor_dbg(us, "Read memory fail\n"); } } else { usb_stor_dbg(us, "Read memory fail, OR (buf[0]&0x24)!=0x24\n"); } } } static void rts51x_modi_suspend_timer(struct rts51x_chip *chip) { struct us_data *us = chip->us; usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); chip->timer_expires = jiffies + msecs_to_jiffies(1000*ss_delay); mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires); } static void rts51x_suspend_timer_fn(struct timer_list *t) { struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer); struct us_data *us = chip->us; switch (rts51x_get_stat(chip)) { case RTS51X_STAT_INIT: case RTS51X_STAT_RUN: rts51x_modi_suspend_timer(chip); break; case RTS51X_STAT_IDLE: case RTS51X_STAT_SS: usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) { usb_stor_dbg(us, "Ready to enter SS state\n"); rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); usb_autopm_put_interface_async(us->pusb_intf); usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); } break; default: usb_stor_dbg(us, "Unknown state !!!\n"); break; } } static inline int working_scsi(struct scsi_cmnd *srb) { if ((srb->cmnd[0] == TEST_UNIT_READY) || (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)) { return 0; } return 1; } static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); static int card_first_show = 1; static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 }; static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 }; int ret; if (working_scsi(srb)) { usb_stor_dbg(us, "working scsi, power.usage:%d\n", atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) { ret = usb_autopm_get_interface(us->pusb_intf); usb_stor_dbg(us, "working scsi, ret=%d\n", ret); } if (rts51x_get_stat(chip) != RTS51X_STAT_RUN) rts51x_set_stat(chip, RTS51X_STAT_RUN); chip->proto_handler_backup(srb, us); } else { if (rts51x_get_stat(chip) == RTS51X_STAT_SS) { usb_stor_dbg(us, "NOT working scsi\n"); if ((srb->cmnd[0] == TEST_UNIT_READY) && (chip->pwr_state == US_SUSPEND)) { if (TST_LUN_READY(chip, srb->device->lun)) { srb->result = SAM_STAT_GOOD; } else { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, media_not_present, US_SENSE_SIZE); } usb_stor_dbg(us, "TEST_UNIT_READY\n"); goto out; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { int prevent = srb->cmnd[4] & 0x1; if (prevent) { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, invalid_cmd_field, US_SENSE_SIZE); } else { srb->result = SAM_STAT_GOOD; } usb_stor_dbg(us, "ALLOW_MEDIUM_REMOVAL\n"); goto out; } } else { usb_stor_dbg(us, "NOT working scsi, not SS\n"); chip->proto_handler_backup(srb, us); /* Check whether card is plugged in */ if (srb->cmnd[0] == TEST_UNIT_READY) { if (srb->result == SAM_STAT_GOOD) { SET_LUN_READY(chip, srb->device->lun); if (card_first_show) { card_first_show = 0; fw5895_set_mmc_wp(us); } } else { CLR_LUN_READY(chip, srb->device->lun); card_first_show = 1; } } if (rts51x_get_stat(chip) != RTS51X_STAT_IDLE) rts51x_set_stat(chip, RTS51X_STAT_IDLE); } } out: usb_stor_dbg(us, "state:%d\n", rts51x_get_stat(chip)); if (rts51x_get_stat(chip) == RTS51X_STAT_RUN) rts51x_modi_suspend_timer(chip); } static int realtek_cr_autosuspend_setup(struct us_data *us) { struct rts51x_chip *chip; struct rts51x_status *status = NULL; u8 buf[16]; int retval; chip = (struct rts51x_chip *)us->extra; chip->support_auto_delink = 0; chip->pwr_state = US_RESUME; chip->lun_ready = 0; rts51x_set_stat(chip, RTS51X_STAT_INIT); retval = rts51x_read_status(us, 0, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) { usb_stor_dbg(us, "Read status fail\n"); return -EIO; } status = chip->status; status->vid = ((u16) buf[0] << 8) | buf[1]; status->pid = ((u16) buf[2] << 8) | buf[3]; status->cur_lun = buf[4]; status->card_type = buf[5]; status->total_lun = buf[6]; status->fw_ver = ((u16) buf[7] << 8) | buf[8]; status->phy_exist = buf[9]; status->multi_flag = buf[10]; status->multi_card = buf[11]; status->log_exist = buf[12]; if (chip->status_len == 16) { status->detailed_type.detailed_type1 = buf[13]; status->function[0] = buf[14]; status->function[1] = buf[15]; } /* back up the proto_handler in us->extra */ chip = (struct rts51x_chip *)(us->extra); chip->proto_handler_backup = us->proto_handler; /* Set the autosuspend_delay to 0 */ pm_runtime_set_autosuspend_delay(&us->pusb_dev->dev, 0); /* override us->proto_handler setted in get_protocol() */ us->proto_handler = rts51x_invoke_transport; chip->timer_expires = 0; timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0); fw5895_init(us); /* enable autosuspend function of the usb device */ usb_enable_autosuspend(us->pusb_dev); return 0; } #endif static void realtek_cr_destructor(void *extra) { struct rts51x_chip *chip = extra; if (!chip) return; #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) { del_timer(&chip->rts51x_suspend_timer); chip->timer_expires = 0; } #endif kfree(chip->status); } #ifdef CONFIG_PM static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message) { struct us_data *us = usb_get_intfdata(iface); /* wait until no command is running */ mutex_lock(&us->dev_mutex); config_autodelink_before_power_down(us); mutex_unlock(&us->dev_mutex); return 0; } static int realtek_cr_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); fw5895_init(us); config_autodelink_after_power_on(us); return 0; } #else #define realtek_cr_suspend NULL #define realtek_cr_resume NULL #endif static int init_realtek_cr(struct us_data *us) { struct rts51x_chip *chip; int size, i, retval; chip = kzalloc(sizeof(struct rts51x_chip), GFP_KERNEL); if (!chip) return -ENOMEM; us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); chip->us = us; usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); size = (chip->max_lun + 1) * sizeof(struct rts51x_status); chip->status = kzalloc(size, GFP_KERNEL); if (!chip->status) goto INIT_FAIL; for (i = 0; i <= (int)(chip->max_lun); i++) { retval = rts51x_check_status(us, (u8) i); if (retval < 0) goto INIT_FAIL; } if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) || CHECK_PID(chip, 0x0159)) { if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); if (STATUS_LEN(chip) == 16) { if (SUPPORT_AUTO_DELINK(chip)) SET_AUTO_DELINK(chip); } } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) realtek_cr_autosuspend_setup(us); #endif usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); (void)config_autodelink_after_power_on(us); return 0; INIT_FAIL: if (us->extra) { kfree(chip->status); kfree(us->extra); us->extra = NULL; } return -EIO; } static struct scsi_host_template realtek_cr_host_template; static int realtek_cr_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; dev_dbg(&intf->dev, "Probe Realtek Card Reader!\n"); result = usb_stor_probe1(&us, intf, id, (id - realtek_cr_ids) + realtek_cr_unusual_dev_list, &realtek_cr_host_template); if (result) return result; result = usb_stor_probe2(us); return result; } static struct usb_driver realtek_cr_driver = { .name = DRV_NAME, .probe = realtek_cr_probe, .disconnect = usb_stor_disconnect, /* .suspend = usb_stor_suspend, */ /* .resume = usb_stor_resume, */ .reset_resume = usb_stor_reset_resume, .suspend = realtek_cr_suspend, .resume = realtek_cr_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = realtek_cr_ids, .soft_unbind = 1, .supports_autosuspend = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(realtek_cr_driver, realtek_cr_host_template, DRV_NAME);
2 2 2 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. * Copyright (C) 2023 Intel Corporation * Author: Luis Carlos Cobo <luisca@cozybit.com> */ #include <linux/etherdevice.h> #include <linux/list.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <net/mac80211.h> #include "wme.h" #include "ieee80211_i.h" #include "mesh.h" #include <linux/rhashtable.h> static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath); static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) { /* Use last four bytes of hw addr as hash index */ return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed); } static const struct rhashtable_params mesh_rht_params = { .nelem_hint = 2, .automatic_shrinking = true, .key_len = ETH_ALEN, .key_offset = offsetof(struct mesh_path, dst), .head_offset = offsetof(struct mesh_path, rhash), .hashfn = mesh_table_hash, }; static const struct rhashtable_params fast_tx_rht_params = { .nelem_hint = 10, .automatic_shrinking = true, .key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key), .key_offset = offsetof(struct ieee80211_mesh_fast_tx, key), .head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash), .hashfn = mesh_table_hash, }; static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr) { struct ieee80211_mesh_fast_tx *entry = ptr; kfree_rcu(entry, fast_tx.rcu_head); } static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata) { struct mesh_tx_cache *cache; cache = &sdata->u.mesh.tx_cache; rhashtable_free_and_destroy(&cache->rht, __mesh_fast_tx_entry_free, NULL); } static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata) { struct mesh_tx_cache *cache; cache = &sdata->u.mesh.tx_cache; rhashtable_init(&cache->rht, &fast_tx_rht_params); INIT_HLIST_HEAD(&cache->walk_head); spin_lock_init(&cache->walk_lock); } static inline bool mpath_expired(struct mesh_path *mpath) { return (mpath->flags & MESH_PATH_ACTIVE) && time_after(jiffies, mpath->exp_time) && !(mpath->flags & MESH_PATH_FIXED); } static void mesh_path_rht_free(void *ptr, void *tblptr) { struct mesh_path *mpath = ptr; struct mesh_table *tbl = tblptr; mesh_path_free_rcu(tbl, mpath); } static void mesh_table_init(struct mesh_table *tbl) { INIT_HLIST_HEAD(&tbl->known_gates); INIT_HLIST_HEAD(&tbl->walk_head); atomic_set(&tbl->entries, 0); spin_lock_init(&tbl->gates_lock); spin_lock_init(&tbl->walk_lock); /* rhashtable_init() may fail only in case of wrong * mesh_rht_params */ WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params)); } static void mesh_table_free(struct mesh_table *tbl) { rhashtable_free_and_destroy(&tbl->rhead, mesh_path_rht_free, tbl); } /** * mesh_path_assign_nexthop - update mesh path next hop * * @mpath: mesh path to update * @sta: next hop to assign * * Locking: mpath->state_lock must be held when calling this function */ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) { struct sk_buff *skb; struct ieee80211_hdr *hdr; unsigned long flags; rcu_assign_pointer(mpath->next_hop, sta); spin_lock_irqsave(&mpath->frame_queue.lock, flags); skb_queue_walk(&mpath->frame_queue, skb) { hdr = (struct ieee80211_hdr *) skb->data; memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); } spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); } static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, struct mesh_path *gate_mpath) { struct ieee80211_hdr *hdr; struct ieee80211s_hdr *mshdr; int mesh_hdrlen, hdrlen; char *next_hop; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = ieee80211_hdrlen(hdr->frame_control); mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); if (!(mshdr->flags & MESH_FLAGS_AE)) { /* size of the fixed part of the mesh header */ mesh_hdrlen = 6; /* make room for the two extended addresses */ skb_push(skb, 2 * ETH_ALEN); memmove(skb->data, hdr, hdrlen + mesh_hdrlen); hdr = (struct ieee80211_hdr *) skb->data; /* we preserve the previous mesh header and only add * the new addresses */ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); mshdr->flags = MESH_FLAGS_AE_A5_A6; memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); } /* update next hop */ hdr = (struct ieee80211_hdr *) skb->data; rcu_read_lock(); next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; memcpy(hdr->addr1, next_hop, ETH_ALEN); rcu_read_unlock(); memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, dst_addr, ETH_ALEN); } /** * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another * * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate) * @from_mpath: The failed mpath * @copy: When true, copy all the frames to the new mpath queue. When false, * move them. * * This function is used to transfer or copy frames from an unresolved mpath to * a gate mpath. The function also adds the Address Extension field and * updates the next hop. * * If a frame already has an Address Extension field, only the next hop and * destination addresses are updated. * * The gate mpath must be an active mpath with a valid mpath->next_hop. */ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, struct mesh_path *from_mpath, bool copy) { struct sk_buff *skb, *fskb, *tmp; struct sk_buff_head failq; unsigned long flags; if (WARN_ON(gate_mpath == from_mpath)) return; if (WARN_ON(!gate_mpath->next_hop)) return; __skb_queue_head_init(&failq); spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); skb_queue_splice_init(&from_mpath->frame_queue, &failq); spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); skb_queue_walk_safe(&failq, fskb, tmp) { if (skb_queue_len(&gate_mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) { mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); break; } skb = skb_copy(fskb, GFP_ATOMIC); if (WARN_ON(!skb)) break; prepare_for_gate(skb, gate_mpath->dst, gate_mpath); skb_queue_tail(&gate_mpath->frame_queue, skb); if (copy) continue; __skb_unlink(fskb, &failq); kfree_skb(fskb); } mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); if (!copy) return; spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); skb_queue_splice(&failq, &from_mpath->frame_queue); spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); } static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct ieee80211_sub_if_data *sdata) { struct mesh_path *mpath; mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params); if (mpath && mpath_expired(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; spin_unlock_bh(&mpath->state_lock); } return mpath; } /** * mesh_path_lookup - look up a path in the mesh path table * @sdata: local subif * @dst: hardware address (ETH_ALEN length) of destination * * Returns: pointer to the mesh path structure, or NULL if not found * * Locking: must be called within a read rcu section. */ struct mesh_path * mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata); } struct mesh_path * mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) { return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata); } static struct mesh_path * __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) { int i = 0; struct mesh_path *mpath; hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (i++ == idx) break; } if (!mpath) return NULL; if (mpath_expired(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; spin_unlock_bh(&mpath->state_lock); } return mpath; } /** * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index * @idx: index * @sdata: local subif, or NULL for all entries * * Returns: pointer to the mesh path structure, or NULL if not found. * * Locking: must be called within a read rcu section. */ struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx); } /** * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index * @idx: index * @sdata: local subif, or NULL for all entries * * Returns: pointer to the proxy path structure, or NULL if not found. * * Locking: must be called within a read rcu section. */ struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx); } /** * mesh_path_add_gate - add the given mpath to a mesh gate to our path table * @mpath: gate path to add to table * * Returns: 0 on success, -EEXIST */ int mesh_path_add_gate(struct mesh_path *mpath) { struct mesh_table *tbl; int err; rcu_read_lock(); tbl = &mpath->sdata->u.mesh.mesh_paths; spin_lock_bh(&mpath->state_lock); if (mpath->is_gate) { err = -EEXIST; spin_unlock_bh(&mpath->state_lock); goto err_rcu; } mpath->is_gate = true; mpath->sdata->u.mesh.num_gates++; spin_lock(&tbl->gates_lock); hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); spin_unlock(&tbl->gates_lock); spin_unlock_bh(&mpath->state_lock); mpath_dbg(mpath->sdata, "Mesh path: Recorded new gate: %pM. %d known gates\n", mpath->dst, mpath->sdata->u.mesh.num_gates); err = 0; err_rcu: rcu_read_unlock(); return err; } /** * mesh_gate_del - remove a mesh gate from the list of known gates * @tbl: table which holds our list of known gates * @mpath: gate mpath */ static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) { lockdep_assert_held(&mpath->state_lock); if (!mpath->is_gate) return; mpath->is_gate = false; spin_lock_bh(&tbl->gates_lock); hlist_del_rcu(&mpath->gate_list); mpath->sdata->u.mesh.num_gates--; spin_unlock_bh(&tbl->gates_lock); mpath_dbg(mpath->sdata, "Mesh path: Deleted gate: %pM. %d known gates\n", mpath->dst, mpath->sdata->u.mesh.num_gates); } /** * mesh_gate_num - number of gates known to this interface * @sdata: subif data * * Returns: The number of gates */ int mesh_gate_num(struct ieee80211_sub_if_data *sdata) { return sdata->u.mesh.num_gates; } static struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, const u8 *dst, gfp_t gfp_flags) { struct mesh_path *new_mpath; new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); if (!new_mpath) return NULL; memcpy(new_mpath->dst, dst, ETH_ALEN); eth_broadcast_addr(new_mpath->rann_snd_addr); new_mpath->is_root = false; new_mpath->sdata = sdata; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); timer_setup(&new_mpath->timer, mesh_path_timer, 0); return new_mpath; } static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache, struct ieee80211_mesh_fast_tx *entry) { hlist_del_rcu(&entry->walk_list); rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params); kfree_rcu(entry, fast_tx.rcu_head); } struct ieee80211_mesh_fast_tx * mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, struct ieee80211_mesh_fast_tx_key *key) { struct ieee80211_mesh_fast_tx *entry; struct mesh_tx_cache *cache; cache = &sdata->u.mesh.tx_cache; entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params); if (!entry) return NULL; if (!(entry->mpath->flags & MESH_PATH_ACTIVE) || mpath_expired(entry->mpath)) { spin_lock_bh(&cache->walk_lock); entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params); if (entry) mesh_fast_tx_entry_free(cache, entry); spin_unlock_bh(&cache->walk_lock); return NULL; } mesh_path_refresh(sdata, entry->mpath, NULL); if (entry->mppath) entry->mppath->exp_time = jiffies; entry->timestamp = jiffies; return entry; } void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct mesh_path *mpath) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_mesh_fast_tx *entry, *prev; struct ieee80211_mesh_fast_tx build = {}; struct ieee80211s_hdr *meshhdr; struct mesh_tx_cache *cache; struct ieee80211_key *key; struct mesh_path *mppath; struct sta_info *sta; u8 *qc; if (sdata->noack_map || !ieee80211_is_data_qos(hdr->frame_control)) return; build.fast_tx.hdr_len = ieee80211_hdrlen(hdr->frame_control); meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len); build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr); cache = &sdata->u.mesh.tx_cache; if (atomic_read(&cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE) return; sta = rcu_dereference(mpath->next_hop); if (!sta) return; build.key.type = MESH_FAST_TX_TYPE_LOCAL; if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) { /* This is required to keep the mppath alive */ mppath = mpp_path_lookup(sdata, meshhdr->eaddr1); if (!mppath) return; build.mppath = mppath; if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr)) build.key.type = MESH_FAST_TX_TYPE_PROXIED; } else if (ieee80211_has_a4(hdr->frame_control)) { mppath = mpath; } else { return; } if (!ether_addr_equal(hdr->addr4, sdata->vif.addr)) build.key.type = MESH_FAST_TX_TYPE_FORWARDED; /* rate limit, in case fast xmit can't be enabled */ if (mppath->fast_tx_check == jiffies) return; mppath->fast_tx_check = jiffies; /* * Same use of the sta lock as in ieee80211_check_fast_xmit, in order * to protect against concurrent sta key updates. */ spin_lock_bh(&sta->lock); key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); if (!key) key = rcu_access_pointer(sdata->default_unicast_key); build.fast_tx.key = key; if (key) { bool gen_iv, iv_spc; gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || (key->flags & KEY_FLAG_TAINTED)) goto unlock_sta; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (gen_iv) build.fast_tx.pn_offs = build.fast_tx.hdr_len; if (gen_iv || iv_spc) build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (gen_iv) build.fast_tx.pn_offs = build.fast_tx.hdr_len; if (gen_iv || iv_spc) build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN; break; default: goto unlock_sta; } } memcpy(build.key.addr, mppath->dst, ETH_ALEN); build.timestamp = jiffies; build.fast_tx.band = info->band; build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3); build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4); build.mpath = mpath; memcpy(build.hdr, meshhdr, build.hdrlen); memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header)); build.hdrlen += sizeof(rfc1042_header); memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len); hdr = (struct ieee80211_hdr *)build.fast_tx.hdr; if (build.fast_tx.key) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); qc = ieee80211_get_qos_ctl(hdr); qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8; entry = kmemdup(&build, sizeof(build), GFP_ATOMIC); if (!entry) goto unlock_sta; spin_lock(&cache->walk_lock); prev = rhashtable_lookup_get_insert_fast(&cache->rht, &entry->rhash, fast_tx_rht_params); if (unlikely(IS_ERR(prev))) { kfree(entry); goto unlock_cache; } /* * replace any previous entry in the hash table, in case we're * replacing it with a different type (e.g. mpath -> mpp) */ if (unlikely(prev)) { rhashtable_replace_fast(&cache->rht, &prev->rhash, &entry->rhash, fast_tx_rht_params); hlist_del_rcu(&prev->walk_list); kfree_rcu(prev, fast_tx.rcu_head); } hlist_add_head(&entry->walk_list, &cache->walk_head); unlock_cache: spin_unlock(&cache->walk_lock); unlock_sta: spin_unlock_bh(&sta->lock); } void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata) { unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT); struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; struct ieee80211_mesh_fast_tx *entry; struct hlist_node *n; if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE) return; spin_lock_bh(&cache->walk_lock); hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) if (!time_is_after_jiffies(entry->timestamp + timeout)) mesh_fast_tx_entry_free(cache, entry); spin_unlock_bh(&cache->walk_lock); } void mesh_fast_tx_flush_mpath(struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; struct ieee80211_mesh_fast_tx *entry; struct hlist_node *n; spin_lock_bh(&cache->walk_lock); hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) if (entry->mpath == mpath) mesh_fast_tx_entry_free(cache, entry); spin_unlock_bh(&cache->walk_lock); } void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; struct ieee80211_mesh_fast_tx *entry; struct hlist_node *n; spin_lock_bh(&cache->walk_lock); hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list) if (rcu_access_pointer(entry->mpath->next_hop) == sta) mesh_fast_tx_entry_free(cache, entry); spin_unlock_bh(&cache->walk_lock); } void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache; struct ieee80211_mesh_fast_tx_key key = {}; struct ieee80211_mesh_fast_tx *entry; int i; ether_addr_copy(key.addr, addr); spin_lock_bh(&cache->walk_lock); for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) { key.type = i; entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params); if (entry) mesh_fast_tx_entry_free(cache, entry); } spin_unlock_bh(&cache->walk_lock); } /** * mesh_path_add - allocate and add a new path to the mesh path table * @dst: destination address of the path (ETH_ALEN length) * @sdata: local subif * * Returns: 0 on success * * State: the initial state of the new path is set to 0 */ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) { struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return ERR_PTR(-EOPNOTSUPP); if (is_multicast_ether_addr(dst)) return ERR_PTR(-EOPNOTSUPP); if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) return ERR_PTR(-ENOSPC); new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) return ERR_PTR(-ENOMEM); tbl = &sdata->u.mesh.mesh_paths; spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); if (!mpath) hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); spin_unlock_bh(&tbl->walk_lock); if (mpath) { kfree(new_mpath); if (IS_ERR(mpath)) return mpath; new_mpath = mpath; } sdata->u.mesh.mesh_paths_generation++; return new_mpath; } int mpp_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst, const u8 *mpp) { struct mesh_table *tbl; struct mesh_path *new_mpath; int ret; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -EOPNOTSUPP; if (is_multicast_ether_addr(dst)) return -EOPNOTSUPP; new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) return -ENOMEM; memcpy(new_mpath->mpp, mpp, ETH_ALEN); tbl = &sdata->u.mesh.mpp_paths; spin_lock_bh(&tbl->walk_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); if (!ret) hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); spin_unlock_bh(&tbl->walk_lock); if (ret) kfree(new_mpath); else mesh_fast_tx_flush_addr(sdata, dst); sdata->u.mesh.mpp_paths_generation++; return ret; } /** * mesh_plink_broken - deactivates paths and sends perr when a link breaks * * @sta: broken peer link * * This function must be called from the rate control algorithm if enough * delivery errors suggest that a peer link is no longer usable. */ void mesh_plink_broken(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; rcu_read_lock(); hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; ++mpath->sn; spin_unlock_bh(&mpath->state_lock); mesh_path_error_tx(sdata, sdata->u.mesh.mshcfg.element_ttl, mpath->dst, mpath->sn, WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); } } rcu_read_unlock(); } static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; spin_lock_bh(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; mesh_gate_del(tbl, mpath); spin_unlock_bh(&mpath->state_lock); timer_shutdown_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) { hlist_del_rcu(&mpath->walk_list); rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); if (tbl == &mpath->sdata->u.mesh.mpp_paths) mesh_fast_tx_flush_addr(mpath->sdata, mpath->dst); else mesh_fast_tx_flush_mpath(mpath); mesh_path_free_rcu(tbl, mpath); } /** * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches * * @sta: mesh peer to match * * RCU notes: this function is called when a mesh plink transitions from * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that * allows path creation. This will happen before the sta can be freed (because * sta_info_destroy() calls this) so any reader in a rcu read block will be * protected against the plink disappearing. */ void mesh_path_flush_by_nexthop(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta) __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, const u8 *proxy) { struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (ether_addr_equal(mpath->mpp, proxy)) __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } static void table_flush_by_iface(struct mesh_table *tbl) { struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } /** * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface * * @sdata: interface data to match * * This function deletes both mesh paths as well as mesh portal paths. */ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { table_flush_by_iface(&sdata->u.mesh.mesh_paths); table_flush_by_iface(&sdata->u.mesh.mpp_paths); } /** * table_path_del - delete a path from the mesh or mpp table * * @tbl: mesh or mpp path table * @sdata: local subif * @addr: dst address (ETH_ALEN length) * * Returns: 0 if successful */ static int table_path_del(struct mesh_table *tbl, struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct mesh_path *mpath; spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); if (!mpath) { spin_unlock_bh(&tbl->walk_lock); return -ENXIO; } __mesh_path_del(tbl, mpath); spin_unlock_bh(&tbl->walk_lock); return 0; } /** * mesh_path_del - delete a mesh path from the table * * @addr: dst address (ETH_ALEN length) * @sdata: local subif * * Returns: 0 if successful */ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) { int err; /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr); err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr); sdata->u.mesh.mesh_paths_generation++; return err; } /** * mesh_path_tx_pending - sends pending frames in a mesh path queue * * @mpath: mesh path to activate * * Locking: the state_lock of the mpath structure must NOT be held when calling * this function. */ void mesh_path_tx_pending(struct mesh_path *mpath) { if (mpath->flags & MESH_PATH_ACTIVE) ieee80211_add_pending_skbs(mpath->sdata->local, &mpath->frame_queue); } /** * mesh_path_send_to_gates - sends pending frames to all known mesh gates * * @mpath: mesh path whose queue will be emptied * * If there is only one gate, the frames are transferred from the failed mpath * queue to that gate's queue. If there are more than one gates, the frames * are copied from each gate to the next. After frames are copied, the * mpath queues are emptied onto the transmission queue. * * Returns: 0 on success, -EHOSTUNREACH */ int mesh_path_send_to_gates(struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; struct mesh_table *tbl; struct mesh_path *from_mpath = mpath; struct mesh_path *gate; bool copy = false; tbl = &sdata->u.mesh.mesh_paths; rcu_read_lock(); hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { if (gate->flags & MESH_PATH_ACTIVE) { mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); mesh_path_move_to_queue(gate, from_mpath, copy); from_mpath = gate; copy = true; } else { mpath_dbg(sdata, "Not forwarding to %pM (flags %#x)\n", gate->dst, gate->flags); } } hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { mpath_dbg(sdata, "Sending to %pM\n", gate->dst); mesh_path_tx_pending(gate); } rcu_read_unlock(); return (from_mpath == mpath) ? -EHOSTUNREACH : 0; } /** * mesh_path_discard_frame - discard a frame whose path could not be resolved * * @skb: frame to discard * @sdata: network subif the frame was to be sent through * * Locking: the function must me called within a rcu_read_lock region */ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { ieee80211_free_txskb(&sdata->local->hw, skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; } /** * mesh_path_flush_pending - free the pending queue of a mesh path * * @mpath: mesh path whose queue has to be freed * * Locking: the function must me called within a rcu_read_lock region */ void mesh_path_flush_pending(struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_preq_queue *preq, *tmp; struct sk_buff *skb; while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) mesh_path_discard_frame(mpath->sdata, skb); spin_lock_bh(&ifmsh->mesh_preq_queue_lock); list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) { if (ether_addr_equal(mpath->dst, preq->dst)) { list_del(&preq->list); kfree(preq); --ifmsh->preq_queue_len; } } spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); } /** * mesh_path_fix_nexthop - force a specific next hop for a mesh path * * @mpath: the mesh path to modify * @next_hop: the next hop to force * * Locking: this function must be called holding mpath->state_lock */ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) { spin_lock_bh(&mpath->state_lock); mesh_path_assign_nexthop(mpath, next_hop); mpath->sn = 0xffff; mpath->metric = 0; mpath->hop_count = 0; mpath->exp_time = 0; mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; mesh_path_activate(mpath); mesh_fast_tx_flush_mpath(mpath); spin_unlock_bh(&mpath->state_lock); ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); /* init it at a low value - 0 start is tricky */ ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); } void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { mesh_table_init(&sdata->u.mesh.mesh_paths); mesh_table_init(&sdata->u.mesh.mpp_paths); mesh_fast_tx_init(sdata); } static void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) { struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths); mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths); } void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { mesh_fast_tx_deinit(sdata); mesh_table_free(&sdata->u.mesh.mesh_paths); mesh_table_free(&sdata->u.mesh.mpp_paths); }
966 967 43 40 44 786 711 703 40 44 44 44 44 787 787 786 706 364 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 // SPDX-License-Identifier: GPL-2.0-only #ifndef KVM_X86_MMU_SPTE_H #define KVM_X86_MMU_SPTE_H #include <asm/vmx.h> #include "mmu.h" #include "mmu_internal.h" /* * A MMU present SPTE is backed by actual memory and may or may not be present * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it * is ignored by all flavors of SPTEs and checking a low bit often generates * better code than for a high bit, e.g. 56+. MMU present checks are pervasive * enough that the improved code generation is noticeable in KVM's footprint. */ #define SPTE_MMU_PRESENT_MASK BIT_ULL(11) /* * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also * be restricted to using write-protection (for L2 when CPU dirty logging, i.e. * PML, is enabled). Use bits 52 and 53 to hold the type of A/D tracking that * is must be employed for a given TDP SPTE. * * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE * paging, including NPT PAE. This scheme works because legacy shadow paging * is guaranteed to have A/D bits and write-protection is forced only for * TDP with CPU dirty logging (PML). If NPT ever gains PML-like support, it * must be restricted to 64-bit KVM. */ #define SPTE_TDP_AD_SHIFT 52 #define SPTE_TDP_AD_MASK (3ULL << SPTE_TDP_AD_SHIFT) #define SPTE_TDP_AD_ENABLED (0ULL << SPTE_TDP_AD_SHIFT) #define SPTE_TDP_AD_DISABLED (1ULL << SPTE_TDP_AD_SHIFT) #define SPTE_TDP_AD_WRPROT_ONLY (2ULL << SPTE_TDP_AD_SHIFT) static_assert(SPTE_TDP_AD_ENABLED == 0); #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK #define SPTE_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) #else #define SPTE_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) #endif #define SPTE_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ | shadow_x_mask | shadow_nx_mask | shadow_me_mask) #define ACC_EXEC_MASK 1 #define ACC_WRITE_MASK PT_WRITABLE_MASK #define ACC_USER_MASK PT_USER_MASK #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) /* The mask for the R/X bits in EPT PTEs */ #define SPTE_EPT_READABLE_MASK 0x1ull #define SPTE_EPT_EXECUTABLE_MASK 0x4ull #define SPTE_LEVEL_BITS 9 #define SPTE_LEVEL_SHIFT(level) __PT_LEVEL_SHIFT(level, SPTE_LEVEL_BITS) #define SPTE_INDEX(address, level) __PT_INDEX(address, level, SPTE_LEVEL_BITS) #define SPTE_ENT_PER_PAGE __PT_ENT_PER_PAGE(SPTE_LEVEL_BITS) /* * The mask/shift to use for saving the original R/X bits when marking the PTE * as not-present for access tracking purposes. We do not save the W bit as the * PTEs being access tracked also need to be dirty tracked, so the W bit will be * restored only when a write is attempted to the page. This mask obviously * must not overlap the A/D type mask. */ #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (SPTE_EPT_READABLE_MASK | \ SPTE_EPT_EXECUTABLE_MASK) #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54 #define SHADOW_ACC_TRACK_SAVED_MASK (SHADOW_ACC_TRACK_SAVED_BITS_MASK << \ SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK)); /* * {DEFAULT,EPT}_SPTE_{HOST,MMU}_WRITABLE are used to keep track of why a given * SPTE is write-protected. See is_writable_pte() for details. */ /* Bits 9 and 10 are ignored by all non-EPT PTEs. */ #define DEFAULT_SPTE_HOST_WRITABLE BIT_ULL(9) #define DEFAULT_SPTE_MMU_WRITABLE BIT_ULL(10) /* * Low ignored bits are at a premium for EPT, use high ignored bits, taking care * to not overlap the A/D type mask or the saved access bits of access-tracked * SPTEs when A/D bits are disabled. */ #define EPT_SPTE_HOST_WRITABLE BIT_ULL(57) #define EPT_SPTE_MMU_WRITABLE BIT_ULL(58) static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK)); static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK)); static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); /* Defined only to keep the above static asserts readable. */ #undef SHADOW_ACC_TRACK_SAVED_MASK /* * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of * the memslots generation and is derived as follows: * * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10 * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62 * * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in * the MMIO generation number, as doing so would require stealing a bit from * the "real" generation number and thus effectively halve the maximum number * of MMIO generations that can be handled before encountering a wrap (which * requires a full MMU zap). The flag is instead explicitly queried when * checking for MMIO spte cache hits. */ #define MMIO_SPTE_GEN_LOW_START 3 #define MMIO_SPTE_GEN_LOW_END 10 #define MMIO_SPTE_GEN_HIGH_START 52 #define MMIO_SPTE_GEN_HIGH_END 62 #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ MMIO_SPTE_GEN_LOW_START) #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ MMIO_SPTE_GEN_HIGH_START) static_assert(!(SPTE_MMU_PRESENT_MASK & (MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK))); /* * The SPTE MMIO mask must NOT overlap the MMIO generation bits or the * MMU-present bit. The generation obviously co-exists with the magic MMIO * mask/value, and MMIO SPTEs are considered !MMU-present. * * The SPTE MMIO mask is allowed to use hardware "present" bits (i.e. all EPT * RWX bits), all physical address bits (legal PA bits are used for "fast" MMIO * and so they're off-limits for generation; additional checks ensure the mask * doesn't overlap legal PA bits), and bit 63 (carved out for future usage). */ #define SPTE_MMIO_ALLOWED_MASK (BIT_ULL(63) | GENMASK_ULL(51, 12) | GENMASK_ULL(2, 0)) static_assert(!(SPTE_MMIO_ALLOWED_MASK & (SPTE_MMU_PRESENT_MASK | MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK))); #define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1) #define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1) /* remember to adjust the comment above as well if you change these */ static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11); #define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0) #define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS) #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0) /* * Non-present SPTE value needs to set bit 63 for TDX, in order to suppress * #VE and get EPT violations on non-present PTEs. We can use the * same value also without TDX for both VMX and SVM: * * For SVM NPT, for non-present spte (bit 0 = 0), other bits are ignored. * For VMX EPT, bit 63 is ignored if #VE is disabled. (EPT_VIOLATION_VE=0) * bit 63 is #VE suppress if #VE is enabled. (EPT_VIOLATION_VE=1) */ #ifdef CONFIG_X86_64 #define SHADOW_NONPRESENT_VALUE BIT_ULL(63) static_assert(!(SHADOW_NONPRESENT_VALUE & SPTE_MMU_PRESENT_MASK)); #else #define SHADOW_NONPRESENT_VALUE 0ULL #endif extern u64 __read_mostly shadow_host_writable_mask; extern u64 __read_mostly shadow_mmu_writable_mask; extern u64 __read_mostly shadow_nx_mask; extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ extern u64 __read_mostly shadow_user_mask; extern u64 __read_mostly shadow_accessed_mask; extern u64 __read_mostly shadow_dirty_mask; extern u64 __read_mostly shadow_mmio_value; extern u64 __read_mostly shadow_mmio_mask; extern u64 __read_mostly shadow_mmio_access_mask; extern u64 __read_mostly shadow_present_mask; extern u64 __read_mostly shadow_memtype_mask; extern u64 __read_mostly shadow_me_value; extern u64 __read_mostly shadow_me_mask; /* * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED; * shadow_acc_track_mask is the set of bits to be cleared in non-accessed * pages. */ extern u64 __read_mostly shadow_acc_track_mask; /* * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order * to guard against L1TF attacks. */ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; /* * The number of high-order 1 bits to use in the mask above. */ #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5 /* * If a thread running without exclusive control of the MMU lock must perform a * multi-part operation on an SPTE, it can set the SPTE to FROZEN_SPTE as a * non-present intermediate value. Other threads which encounter this value * should not modify the SPTE. * * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on * both AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF * vulnerability. * * Only used by the TDP MMU. */ #define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL) /* Frozen SPTEs must not be misconstrued as shadow present PTEs. */ static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK)); static inline bool is_frozen_spte(u64 spte) { return spte == FROZEN_SPTE; } /* Get an SPTE's index into its parent's page table (and the spt array). */ static inline int spte_index(u64 *sptep) { return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1); } /* * In some cases, we need to preserve the GFN of a non-present or reserved * SPTE when we usurp the upper five bits of the physical address space to * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask * left into the reserved bits, i.e. the GFN in the SPTE will be split into * high and low parts. This mask covers the lower bits of the GFN. */ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) { struct page *page = pfn_to_page((shadow_page) >> PAGE_SHIFT); return (struct kvm_mmu_page *)page_private(page); } static inline struct kvm_mmu_page *spte_to_child_sp(u64 spte) { return to_shadow_page(spte & SPTE_BASE_ADDR_MASK); } static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) { return to_shadow_page(__pa(sptep)); } static inline struct kvm_mmu_page *root_to_sp(hpa_t root) { if (kvm_mmu_is_dummy_root(root)) return NULL; /* * The "root" may be a special root, e.g. a PAE entry, treat it as a * SPTE to ensure any non-PA bits are dropped. */ return spte_to_child_sp(root); } static inline bool is_mmio_spte(struct kvm *kvm, u64 spte) { return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value && likely(enable_mmio_caching); } static inline bool is_shadow_present_pte(u64 pte) { return !!(pte & SPTE_MMU_PRESENT_MASK); } static inline bool is_ept_ve_possible(u64 spte) { return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) && !(spte & VMX_EPT_SUPPRESS_VE_BIT) && (spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE; } /* * Returns true if A/D bits are supported in hardware and are enabled by KVM. * When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can * disable A/D bits in EPTP12, SP and SPTE variants are needed to handle the * scenario where KVM is using A/D bits for L1, but not L2. */ static inline bool kvm_ad_enabled(void) { return !!shadow_accessed_mask; } static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) { return sp->role.ad_disabled; } static inline bool spte_ad_enabled(u64 spte) { KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED; } static inline bool spte_ad_need_write_protect(u64 spte) { KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); /* * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED is '0', * and non-TDP SPTEs will never set these bits. Optimize for 64-bit * TDP and do the A/D type check unconditionally. */ return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED; } static inline u64 spte_shadow_accessed_mask(u64 spte) { KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; } static inline u64 spte_shadow_dirty_mask(u64 spte) { KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; } static inline bool is_access_track_spte(u64 spte) { return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; } static inline bool is_large_pte(u64 pte) { return pte & PT_PAGE_SIZE_MASK; } static inline bool is_last_spte(u64 pte, int level) { return (level == PG_LEVEL_4K) || is_large_pte(pte); } static inline bool is_executable_pte(u64 spte) { return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; } static inline kvm_pfn_t spte_to_pfn(u64 pte) { return (pte & SPTE_BASE_ADDR_MASK) >> PAGE_SHIFT; } static inline bool is_accessed_spte(u64 spte) { u64 accessed_mask = spte_shadow_accessed_mask(spte); return accessed_mask ? spte & accessed_mask : !is_access_track_spte(spte); } static inline bool is_dirty_spte(u64 spte) { u64 dirty_mask = spte_shadow_dirty_mask(spte); return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; } static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) { int bit7 = (pte >> 7) & 1; return rsvd_check->rsvd_bits_mask[bit7][level-1]; } static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) { return pte & get_rsvd_bits(rsvd_check, pte, level); } static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte) { return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); } static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level) { return __is_bad_mt_xwr(rsvd_check, spte) || __is_rsvd_bits_set(rsvd_check, spte, level); } /* * A shadow-present leaf SPTE may be non-writable for 4 possible reasons: * * 1. To intercept writes for dirty logging. KVM write-protects huge pages * so that they can be split down into the dirty logging * granularity (4KiB) whenever the guest writes to them. KVM also * write-protects 4KiB pages so that writes can be recorded in the dirty log * (e.g. if not using PML). SPTEs are write-protected for dirty logging * during the VM-iotcls that enable dirty logging. * * 2. To intercept writes to guest page tables that KVM is shadowing. When a * guest writes to its page table the corresponding shadow page table will * be marked "unsync". That way KVM knows which shadow page tables need to * be updated on the next TLB flush, INVLPG, etc. and which do not. * * 3. To prevent guest writes to read-only memory, such as for memory in a * read-only memslot or guest memory backed by a read-only VMA. Writes to * such pages are disallowed entirely. * * 4. To emulate the Accessed bit for SPTEs without A/D bits. Note, in this * case, the SPTE is access-protected, not just write-protected! * * For cases #1 and #4, KVM can safely make such SPTEs writable without taking * mmu_lock as capturing the Accessed/Dirty state doesn't require taking it. * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits * in the SPTE: * * shadow_mmu_writable_mask, aka MMU-writable - * Cleared on SPTEs that KVM is currently write-protecting for shadow paging * purposes (case 2 above). * * shadow_host_writable_mask, aka Host-writable - * Cleared on SPTEs that are not host-writable (case 3 above) * * Note, not all possible combinations of PT_WRITABLE_MASK, * shadow_mmu_writable_mask, and shadow_host_writable_mask are valid. A given * SPTE can be in only one of the following states, which map to the * aforementioned 3 cases: * * shadow_host_writable_mask | shadow_mmu_writable_mask | PT_WRITABLE_MASK * ------------------------- | ------------------------ | ---------------- * 1 | 1 | 1 (writable) * 1 | 1 | 0 (case 1) * 1 | 0 | 0 (case 2) * 0 | 0 | 0 (case 3) * * The valid combinations of these bits are checked by * check_spte_writable_invariants() whenever an SPTE is modified. * * Clearing the MMU-writable bit is always done under the MMU lock and always * accompanied by a TLB flush before dropping the lock to avoid corrupting the * shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging * (which does not clear the MMU-writable bit), does not flush TLBs before * dropping the lock, as it only needs to synchronize guest writes with the * dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) for * access-tracking via the clear_young() MMU notifier also does not flush TLBs. * * So, there is the problem: clearing the MMU-writable bit can encounter a * write-protected SPTE while CPUs still have writable mappings for that SPTE * cached in their TLB. To address this, KVM always flushes TLBs when * write-protecting SPTEs if the MMU-writable bit is set on the old SPTE. * * The Host-writable bit is not modified on present SPTEs, it is only set or * cleared when an SPTE is first faulted in from non-present and then remains * immutable. */ static inline bool is_writable_pte(unsigned long pte) { return pte & PT_WRITABLE_MASK; } /* Note: spte must be a shadow-present leaf SPTE. */ static inline void check_spte_writable_invariants(u64 spte) { if (spte & shadow_mmu_writable_mask) WARN_ONCE(!(spte & shadow_host_writable_mask), KBUILD_MODNAME ": MMU-writable SPTE is not Host-writable: %llx", spte); else WARN_ONCE(is_writable_pte(spte), KBUILD_MODNAME ": Writable SPTE is not MMU-writable: %llx", spte); } static inline bool is_mmu_writable_spte(u64 spte) { return spte & shadow_mmu_writable_mask; } static inline u64 get_mmio_spte_generation(u64 spte) { u64 gen; gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT; gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT; return gen; } bool spte_has_volatile_bits(u64 spte); bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte); u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, int index); u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled); u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access); u64 mark_spte_for_access_track(u64 spte); /* Restore an acc-track PTE back to a regular PTE */ static inline u64 restore_acc_track_spte(u64 spte) { u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) & SHADOW_ACC_TRACK_SAVED_BITS_MASK; spte &= ~shadow_acc_track_mask; spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK << SHADOW_ACC_TRACK_SAVED_BITS_SHIFT); spte |= saved_bits; return spte; } void __init kvm_mmu_spte_module_init(void); void kvm_mmu_reset_all_pte_masks(void); #endif
33 45 43 36 43 38 33 33 33 26 28 31 33 33 33 33 33 33 14 33 33 160 13 14 13 14 14 14 13 14 14 14 14 14 14 14 14 14 14 14 14 13 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 13 14 13 5 13 5 14 13 14 14 13 26 9 9 9 9 9 9 9 9 9 9 9 2 2 2 2 2 2 2 31 31 2 2 5 1 2 2 2 2 2 11 10 8 1 4 1 1 1 1 28 28 28 28 29 26 26 26 28 28 28 29 29 20 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 // SPDX-License-Identifier: GPL-2.0-only /* * Media entity * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> */ #include <linux/bitmap.h> #include <linux/list.h> #include <linux/property.h> #include <linux/slab.h> #include <media/media-entity.h> #include <media/media-device.h> static inline const char *intf_type(struct media_interface *intf) { switch (intf->type) { case MEDIA_INTF_T_DVB_FE: return "dvb-frontend"; case MEDIA_INTF_T_DVB_DEMUX: return "dvb-demux"; case MEDIA_INTF_T_DVB_DVR: return "dvb-dvr"; case MEDIA_INTF_T_DVB_CA: return "dvb-ca"; case MEDIA_INTF_T_DVB_NET: return "dvb-net"; case MEDIA_INTF_T_V4L_VIDEO: return "v4l-video"; case MEDIA_INTF_T_V4L_VBI: return "v4l-vbi"; case MEDIA_INTF_T_V4L_RADIO: return "v4l-radio"; case MEDIA_INTF_T_V4L_SUBDEV: return "v4l-subdev"; case MEDIA_INTF_T_V4L_SWRADIO: return "v4l-swradio"; case MEDIA_INTF_T_V4L_TOUCH: return "v4l-touch"; default: return "unknown-intf"; } }; static inline const char *link_type_name(struct media_link *link) { switch (link->flags & MEDIA_LNK_FL_LINK_TYPE) { case MEDIA_LNK_FL_DATA_LINK: return "data"; case MEDIA_LNK_FL_INTERFACE_LINK: return "interface"; case MEDIA_LNK_FL_ANCILLARY_LINK: return "ancillary"; default: return "unknown"; } } __must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, struct media_device *mdev) { int idx_max; idx_max = ALIGN(mdev->entity_internal_idx_max + 1, BITS_PER_LONG); ent_enum->bmap = bitmap_zalloc(idx_max, GFP_KERNEL); if (!ent_enum->bmap) return -ENOMEM; ent_enum->idx_max = idx_max; return 0; } EXPORT_SYMBOL_GPL(media_entity_enum_init); void media_entity_enum_cleanup(struct media_entity_enum *ent_enum) { bitmap_free(ent_enum->bmap); } EXPORT_SYMBOL_GPL(media_entity_enum_cleanup); /** * dev_dbg_obj - Prints in debug mode a change on some object * * @event_name: Name of the event to report. Could be __func__ * @gobj: Pointer to the object * * Enabled only if DEBUG or CONFIG_DYNAMIC_DEBUG. Otherwise, it * won't produce any code. */ static void dev_dbg_obj(const char *event_name, struct media_gobj *gobj) { #if defined(DEBUG) || defined (CONFIG_DYNAMIC_DEBUG) switch (media_type(gobj)) { case MEDIA_GRAPH_ENTITY: dev_dbg(gobj->mdev->dev, "%s id %u: entity '%s'\n", event_name, media_id(gobj), gobj_to_entity(gobj)->name); break; case MEDIA_GRAPH_LINK: { struct media_link *link = gobj_to_link(gobj); dev_dbg(gobj->mdev->dev, "%s id %u: %s link id %u ==> id %u\n", event_name, media_id(gobj), link_type_name(link), media_id(link->gobj0), media_id(link->gobj1)); break; } case MEDIA_GRAPH_PAD: { struct media_pad *pad = gobj_to_pad(gobj); dev_dbg(gobj->mdev->dev, "%s id %u: %s%spad '%s':%d\n", event_name, media_id(gobj), pad->flags & MEDIA_PAD_FL_SINK ? "sink " : "", pad->flags & MEDIA_PAD_FL_SOURCE ? "source " : "", pad->entity->name, pad->index); break; } case MEDIA_GRAPH_INTF_DEVNODE: { struct media_interface *intf = gobj_to_intf(gobj); struct media_intf_devnode *devnode = intf_to_devnode(intf); dev_dbg(gobj->mdev->dev, "%s id %u: intf_devnode %s - major: %d, minor: %d\n", event_name, media_id(gobj), intf_type(intf), devnode->major, devnode->minor); break; } } #endif } void media_gobj_create(struct media_device *mdev, enum media_gobj_type type, struct media_gobj *gobj) { BUG_ON(!mdev); gobj->mdev = mdev; /* Create a per-type unique object ID */ gobj->id = media_gobj_gen_id(type, ++mdev->id); switch (type) { case MEDIA_GRAPH_ENTITY: list_add_tail(&gobj->list, &mdev->entities); break; case MEDIA_GRAPH_PAD: list_add_tail(&gobj->list, &mdev->pads); break; case MEDIA_GRAPH_LINK: list_add_tail(&gobj->list, &mdev->links); break; case MEDIA_GRAPH_INTF_DEVNODE: list_add_tail(&gobj->list, &mdev->interfaces); break; } mdev->topology_version++; dev_dbg_obj(__func__, gobj); } void media_gobj_destroy(struct media_gobj *gobj) { /* Do nothing if the object is not linked. */ if (gobj->mdev == NULL) return; dev_dbg_obj(__func__, gobj); gobj->mdev->topology_version++; /* Remove the object from mdev list */ list_del(&gobj->list); gobj->mdev = NULL; } /* * TODO: Get rid of this. */ #define MEDIA_ENTITY_MAX_PADS 512 int media_entity_pads_init(struct media_entity *entity, u16 num_pads, struct media_pad *pads) { struct media_device *mdev = entity->graph_obj.mdev; struct media_pad *iter; unsigned int i = 0; int ret = 0; if (num_pads >= MEDIA_ENTITY_MAX_PADS) return -E2BIG; entity->num_pads = num_pads; entity->pads = pads; if (mdev) mutex_lock(&mdev->graph_mutex); media_entity_for_each_pad(entity, iter) { iter->entity = entity; iter->index = i++; if (hweight32(iter->flags & (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) != 1) { ret = -EINVAL; break; } if (mdev) media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj); } if (ret && mdev) { media_entity_for_each_pad(entity, iter) media_gobj_destroy(&iter->graph_obj); } if (mdev) mutex_unlock(&mdev->graph_mutex); return ret; } EXPORT_SYMBOL_GPL(media_entity_pads_init); /* ----------------------------------------------------------------------------- * Graph traversal */ /** * media_entity_has_pad_interdep - Check interdependency between two pads * * @entity: The entity * @pad0: The first pad index * @pad1: The second pad index * * This function checks the interdependency inside the entity between @pad0 * and @pad1. If two pads are interdependent they are part of the same pipeline * and enabling one of the pads means that the other pad will become "locked" * and doesn't allow configuration changes. * * This function uses the &media_entity_operations.has_pad_interdep() operation * to check the dependency inside the entity between @pad0 and @pad1. If the * has_pad_interdep operation is not implemented, all pads of the entity are * considered to be interdependent. * * One of @pad0 and @pad1 must be a sink pad and the other one a source pad. * The function returns false if both pads are sinks or sources. * * The caller must hold entity->graph_obj.mdev->mutex. * * Return: true if the pads are connected internally and false otherwise. */ static bool media_entity_has_pad_interdep(struct media_entity *entity, unsigned int pad0, unsigned int pad1) { if (pad0 >= entity->num_pads || pad1 >= entity->num_pads) return false; if (entity->pads[pad0].flags & entity->pads[pad1].flags & (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) return false; if (!entity->ops || !entity->ops->has_pad_interdep) return true; return entity->ops->has_pad_interdep(entity, pad0, pad1); } static struct media_entity * media_entity_other(struct media_entity *entity, struct media_link *link) { if (link->source->entity == entity) return link->sink->entity; else return link->source->entity; } /* push an entity to traversal stack */ static void stack_push(struct media_graph *graph, struct media_entity *entity) { if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) { WARN_ON(1); return; } graph->top++; graph->stack[graph->top].link = entity->links.next; graph->stack[graph->top].entity = entity; } static struct media_entity *stack_pop(struct media_graph *graph) { struct media_entity *entity; entity = graph->stack[graph->top].entity; graph->top--; return entity; } #define link_top(en) ((en)->stack[(en)->top].link) #define stack_top(en) ((en)->stack[(en)->top].entity) /** * media_graph_walk_init - Allocate resources for graph walk * @graph: Media graph structure that will be used to walk the graph * @mdev: Media device * * Reserve resources for graph walk in media device's current * state. The memory must be released using * media_graph_walk_cleanup(). * * Returns error on failure, zero on success. */ __must_check int media_graph_walk_init( struct media_graph *graph, struct media_device *mdev) { return media_entity_enum_init(&graph->ent_enum, mdev); } EXPORT_SYMBOL_GPL(media_graph_walk_init); /** * media_graph_walk_cleanup - Release resources related to graph walking * @graph: Media graph structure that was used to walk the graph */ void media_graph_walk_cleanup(struct media_graph *graph) { media_entity_enum_cleanup(&graph->ent_enum); } EXPORT_SYMBOL_GPL(media_graph_walk_cleanup); void media_graph_walk_start(struct media_graph *graph, struct media_entity *entity) { media_entity_enum_zero(&graph->ent_enum); media_entity_enum_set(&graph->ent_enum, entity); graph->top = 0; graph->stack[graph->top].entity = NULL; stack_push(graph, entity); dev_dbg(entity->graph_obj.mdev->dev, "begin graph walk at '%s'\n", entity->name); } EXPORT_SYMBOL_GPL(media_graph_walk_start); static void media_graph_walk_iter(struct media_graph *graph) { struct media_entity *entity = stack_top(graph); struct media_link *link; struct media_entity *next; link = list_entry(link_top(graph), typeof(*link), list); /* If the link is not a data link, don't follow it */ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) { link_top(graph) = link_top(graph)->next; return; } /* The link is not enabled so we do not follow. */ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { link_top(graph) = link_top(graph)->next; dev_dbg(entity->graph_obj.mdev->dev, "walk: skipping disabled link '%s':%u -> '%s':%u\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); return; } /* Get the entity at the other end of the link. */ next = media_entity_other(entity, link); /* Has the entity already been visited? */ if (media_entity_enum_test_and_set(&graph->ent_enum, next)) { link_top(graph) = link_top(graph)->next; dev_dbg(entity->graph_obj.mdev->dev, "walk: skipping entity '%s' (already seen)\n", next->name); return; } /* Push the new entity to stack and start over. */ link_top(graph) = link_top(graph)->next; stack_push(graph, next); dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n", next->name); lockdep_assert_held(&entity->graph_obj.mdev->graph_mutex); } struct media_entity *media_graph_walk_next(struct media_graph *graph) { struct media_entity *entity; if (stack_top(graph) == NULL) return NULL; /* * Depth first search. Push entity to stack and continue from * top of the stack until no more entities on the level can be * found. */ while (link_top(graph) != &stack_top(graph)->links) media_graph_walk_iter(graph); entity = stack_pop(graph); dev_dbg(entity->graph_obj.mdev->dev, "walk: returning entity '%s'\n", entity->name); return entity; } EXPORT_SYMBOL_GPL(media_graph_walk_next); /* ----------------------------------------------------------------------------- * Pipeline management */ /* * The pipeline traversal stack stores pads that are reached during graph * traversal, with a list of links to be visited to continue the traversal. * When a new pad is reached, an entry is pushed on the top of the stack and * points to the incoming pad and the first link of the entity. * * To find further pads in the pipeline, the traversal algorithm follows * internal pad dependencies in the entity, and then links in the graph. It * does so by iterating over all links of the entity, and following enabled * links that originate from a pad that is internally connected to the incoming * pad, as reported by the media_entity_has_pad_interdep() function. */ /** * struct media_pipeline_walk_entry - Entry in the pipeline traversal stack * * @pad: The media pad being visited * @links: Links left to be visited */ struct media_pipeline_walk_entry { struct media_pad *pad; struct list_head *links; }; /** * struct media_pipeline_walk - State used by the media pipeline traversal * algorithm * * @mdev: The media device * @stack: Depth-first search stack * @stack.size: Number of allocated entries in @stack.entries * @stack.top: Index of the top stack entry (-1 if the stack is empty) * @stack.entries: Stack entries */ struct media_pipeline_walk { struct media_device *mdev; struct { unsigned int size; int top; struct media_pipeline_walk_entry *entries; } stack; }; #define MEDIA_PIPELINE_STACK_GROW_STEP 16 static struct media_pipeline_walk_entry * media_pipeline_walk_top(struct media_pipeline_walk *walk) { return &walk->stack.entries[walk->stack.top]; } static bool media_pipeline_walk_empty(struct media_pipeline_walk *walk) { return walk->stack.top == -1; } /* Increase the stack size by MEDIA_PIPELINE_STACK_GROW_STEP elements. */ static int media_pipeline_walk_resize(struct media_pipeline_walk *walk) { struct media_pipeline_walk_entry *entries; unsigned int new_size; /* Safety check, to avoid stack overflows in case of bugs. */ if (walk->stack.size >= 256) return -E2BIG; new_size = walk->stack.size + MEDIA_PIPELINE_STACK_GROW_STEP; entries = krealloc(walk->stack.entries, new_size * sizeof(*walk->stack.entries), GFP_KERNEL); if (!entries) return -ENOMEM; walk->stack.entries = entries; walk->stack.size = new_size; return 0; } /* Push a new entry on the stack. */ static int media_pipeline_walk_push(struct media_pipeline_walk *walk, struct media_pad *pad) { struct media_pipeline_walk_entry *entry; int ret; if (walk->stack.top + 1 >= walk->stack.size) { ret = media_pipeline_walk_resize(walk); if (ret) return ret; } walk->stack.top++; entry = media_pipeline_walk_top(walk); entry->pad = pad; entry->links = pad->entity->links.next; dev_dbg(walk->mdev->dev, "media pipeline: pushed entry %u: '%s':%u\n", walk->stack.top, pad->entity->name, pad->index); return 0; } /* * Move the top entry link cursor to the next link. If all links of the entry * have been visited, pop the entry itself. Return true if the entry has been * popped. */ static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk) { struct media_pipeline_walk_entry *entry; if (WARN_ON(walk->stack.top < 0)) return false; entry = media_pipeline_walk_top(walk); if (entry->links->next == &entry->pad->entity->links) { dev_dbg(walk->mdev->dev, "media pipeline: entry %u has no more links, popping\n", walk->stack.top); walk->stack.top--; return true; } entry->links = entry->links->next; dev_dbg(walk->mdev->dev, "media pipeline: moved entry %u to next link\n", walk->stack.top); return false; } /* Free all memory allocated while walking the pipeline. */ static void media_pipeline_walk_destroy(struct media_pipeline_walk *walk) { kfree(walk->stack.entries); } /* Add a pad to the pipeline and push it to the stack. */ static int media_pipeline_add_pad(struct media_pipeline *pipe, struct media_pipeline_walk *walk, struct media_pad *pad) { struct media_pipeline_pad *ppad; list_for_each_entry(ppad, &pipe->pads, list) { if (ppad->pad == pad) { dev_dbg(pad->graph_obj.mdev->dev, "media pipeline: already contains pad '%s':%u\n", pad->entity->name, pad->index); return 0; } } ppad = kzalloc(sizeof(*ppad), GFP_KERNEL); if (!ppad) return -ENOMEM; ppad->pipe = pipe; ppad->pad = pad; list_add_tail(&ppad->list, &pipe->pads); dev_dbg(pad->graph_obj.mdev->dev, "media pipeline: added pad '%s':%u\n", pad->entity->name, pad->index); return media_pipeline_walk_push(walk, pad); } /* Explore the next link of the entity at the top of the stack. */ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, struct media_pipeline_walk *walk) { struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk); struct media_pad *origin; struct media_link *link; struct media_pad *local; struct media_pad *remote; bool last_link; int ret; origin = entry->pad; link = list_entry(entry->links, typeof(*link), list); last_link = media_pipeline_walk_pop(walk); if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) { dev_dbg(walk->mdev->dev, "media pipeline: skipping link (not data-link)\n"); return 0; } dev_dbg(walk->mdev->dev, "media pipeline: exploring link '%s':%u -> '%s':%u\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); /* Get the local pad and remote pad. */ if (link->source->entity == origin->entity) { local = link->source; remote = link->sink; } else { local = link->sink; remote = link->source; } /* * Skip links that originate from a different pad than the incoming pad * that is not connected internally in the entity to the incoming pad. */ if (origin != local && !media_entity_has_pad_interdep(origin->entity, origin->index, local->index)) { dev_dbg(walk->mdev->dev, "media pipeline: skipping link (no route)\n"); goto done; } /* * Add the local pad of the link to the pipeline and push it to the * stack, if not already present. */ ret = media_pipeline_add_pad(pipe, walk, local); if (ret) return ret; /* Similarly, add the remote pad, but only if the link is enabled. */ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { dev_dbg(walk->mdev->dev, "media pipeline: skipping link (disabled)\n"); goto done; } ret = media_pipeline_add_pad(pipe, walk, remote); if (ret) return ret; done: /* * If we're done iterating over links, iterate over pads of the entity. * This is necessary to discover pads that are not connected with any * link. Those are dead ends from a pipeline exploration point of view, * but are still part of the pipeline and need to be added to enable * proper validation. */ if (!last_link) return 0; dev_dbg(walk->mdev->dev, "media pipeline: adding unconnected pads of '%s'\n", local->entity->name); media_entity_for_each_pad(origin->entity, local) { /* * Skip the origin pad (already handled), pad that have links * (already discovered through iterating over links) and pads * not internally connected. */ if (origin == local || !local->num_links || !media_entity_has_pad_interdep(origin->entity, origin->index, local->index)) continue; ret = media_pipeline_add_pad(pipe, walk, local); if (ret) return ret; } return 0; } static void media_pipeline_cleanup(struct media_pipeline *pipe) { while (!list_empty(&pipe->pads)) { struct media_pipeline_pad *ppad; ppad = list_first_entry(&pipe->pads, typeof(*ppad), list); list_del(&ppad->list); kfree(ppad); } } static int media_pipeline_populate(struct media_pipeline *pipe, struct media_pad *pad) { struct media_pipeline_walk walk = { }; struct media_pipeline_pad *ppad; int ret; /* * Populate the media pipeline by walking the media graph, starting * from @pad. */ INIT_LIST_HEAD(&pipe->pads); pipe->mdev = pad->graph_obj.mdev; walk.mdev = pipe->mdev; walk.stack.top = -1; ret = media_pipeline_add_pad(pipe, &walk, pad); if (ret) goto done; /* * Use a depth-first search algorithm: as long as the stack is not * empty, explore the next link of the top entry. The * media_pipeline_explore_next_link() function will either move to the * next link, pop the entry if fully visited, or add new entries on * top. */ while (!media_pipeline_walk_empty(&walk)) { ret = media_pipeline_explore_next_link(pipe, &walk); if (ret) goto done; } dev_dbg(pad->graph_obj.mdev->dev, "media pipeline populated, found pads:\n"); list_for_each_entry(ppad, &pipe->pads, list) dev_dbg(pad->graph_obj.mdev->dev, "- '%s':%u\n", ppad->pad->entity->name, ppad->pad->index); WARN_ON(walk.stack.top != -1); ret = 0; done: media_pipeline_walk_destroy(&walk); if (ret) media_pipeline_cleanup(pipe); return ret; } __must_check int __media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe) { struct media_device *mdev = pad->graph_obj.mdev; struct media_pipeline_pad *err_ppad; struct media_pipeline_pad *ppad; int ret; lockdep_assert_held(&mdev->graph_mutex); /* * If the pad is already part of a pipeline, that pipeline must be the * same as the pipe given to media_pipeline_start(). */ if (WARN_ON(pad->pipe && pad->pipe != pipe)) return -EINVAL; /* * If the pipeline has already been started, it is guaranteed to be * valid, so just increase the start count. */ if (pipe->start_count) { pipe->start_count++; return 0; } /* * Populate the pipeline. This populates the media_pipeline pads list * with media_pipeline_pad instances for each pad found during graph * walk. */ ret = media_pipeline_populate(pipe, pad); if (ret) return ret; /* * Now that all the pads in the pipeline have been gathered, perform * the validation steps. */ list_for_each_entry(ppad, &pipe->pads, list) { struct media_pad *pad = ppad->pad; struct media_entity *entity = pad->entity; bool has_enabled_link = false; struct media_link *link; dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name, pad->index); /* * 1. Ensure that the pad doesn't already belong to a different * pipeline. */ if (pad->pipe) { dev_dbg(mdev->dev, "Failed to start pipeline: pad '%s':%u busy\n", pad->entity->name, pad->index); ret = -EBUSY; goto error; } /* * 2. Validate all active links whose sink is the current pad. * Validation of the source pads is performed in the context of * the connected sink pad to avoid duplicating checks. */ for_each_media_entity_data_link(entity, link) { /* Skip links unrelated to the current pad. */ if (link->sink != pad && link->source != pad) continue; /* Record if the pad has links and enabled links. */ if (link->flags & MEDIA_LNK_FL_ENABLED) has_enabled_link = true; /* * Validate the link if it's enabled and has the * current pad as its sink. */ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) continue; if (link->sink != pad) continue; if (!entity->ops || !entity->ops->link_validate) continue; ret = entity->ops->link_validate(link); if (ret) { dev_dbg(mdev->dev, "Link '%s':%u -> '%s':%u failed validation: %d\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index, ret); goto error; } dev_dbg(mdev->dev, "Link '%s':%u -> '%s':%u is valid\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); } /* * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set, * ensure that it has either no link or an enabled link. */ if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && !has_enabled_link) { dev_dbg(mdev->dev, "Pad '%s':%u must be connected by an enabled link\n", pad->entity->name, pad->index); ret = -ENOLINK; goto error; } /* Validation passed, store the pipe pointer in the pad. */ pad->pipe = pipe; } pipe->start_count++; return 0; error: /* * Link validation on graph failed. We revert what we did and * return the error. */ list_for_each_entry(err_ppad, &pipe->pads, list) { if (err_ppad == ppad) break; err_ppad->pad->pipe = NULL; } media_pipeline_cleanup(pipe); return ret; } EXPORT_SYMBOL_GPL(__media_pipeline_start); __must_check int media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe) { struct media_device *mdev = pad->graph_obj.mdev; int ret; mutex_lock(&mdev->graph_mutex); ret = __media_pipeline_start(pad, pipe); mutex_unlock(&mdev->graph_mutex); return ret; } EXPORT_SYMBOL_GPL(media_pipeline_start); void __media_pipeline_stop(struct media_pad *pad) { struct media_pipeline *pipe = pad->pipe; struct media_pipeline_pad *ppad; /* * If the following check fails, the driver has performed an * unbalanced call to media_pipeline_stop() */ if (WARN_ON(!pipe)) return; if (--pipe->start_count) return; list_for_each_entry(ppad, &pipe->pads, list) ppad->pad->pipe = NULL; media_pipeline_cleanup(pipe); if (pipe->allocated) kfree(pipe); } EXPORT_SYMBOL_GPL(__media_pipeline_stop); void media_pipeline_stop(struct media_pad *pad) { struct media_device *mdev = pad->graph_obj.mdev; mutex_lock(&mdev->graph_mutex); __media_pipeline_stop(pad); mutex_unlock(&mdev->graph_mutex); } EXPORT_SYMBOL_GPL(media_pipeline_stop); __must_check int media_pipeline_alloc_start(struct media_pad *pad) { struct media_device *mdev = pad->graph_obj.mdev; struct media_pipeline *new_pipe = NULL; struct media_pipeline *pipe; int ret; mutex_lock(&mdev->graph_mutex); /* * Is the pad already part of a pipeline? If not, we need to allocate * a pipe. */ pipe = media_pad_pipeline(pad); if (!pipe) { new_pipe = kzalloc(sizeof(*new_pipe), GFP_KERNEL); if (!new_pipe) { ret = -ENOMEM; goto out; } pipe = new_pipe; pipe->allocated = true; } ret = __media_pipeline_start(pad, pipe); if (ret) kfree(new_pipe); out: mutex_unlock(&mdev->graph_mutex); return ret; } EXPORT_SYMBOL_GPL(media_pipeline_alloc_start); struct media_pad * __media_pipeline_pad_iter_next(struct media_pipeline *pipe, struct media_pipeline_pad_iter *iter, struct media_pad *pad) { if (!pad) iter->cursor = pipe->pads.next; if (iter->cursor == &pipe->pads) return NULL; pad = list_entry(iter->cursor, struct media_pipeline_pad, list)->pad; iter->cursor = iter->cursor->next; return pad; } EXPORT_SYMBOL_GPL(__media_pipeline_pad_iter_next); int media_pipeline_entity_iter_init(struct media_pipeline *pipe, struct media_pipeline_entity_iter *iter) { return media_entity_enum_init(&iter->ent_enum, pipe->mdev); } EXPORT_SYMBOL_GPL(media_pipeline_entity_iter_init); void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter) { media_entity_enum_cleanup(&iter->ent_enum); } EXPORT_SYMBOL_GPL(media_pipeline_entity_iter_cleanup); struct media_entity * __media_pipeline_entity_iter_next(struct media_pipeline *pipe, struct media_pipeline_entity_iter *iter, struct media_entity *entity) { if (!entity) iter->cursor = pipe->pads.next; while (iter->cursor != &pipe->pads) { struct media_pipeline_pad *ppad; struct media_entity *entity; ppad = list_entry(iter->cursor, struct media_pipeline_pad, list); entity = ppad->pad->entity; iter->cursor = iter->cursor->next; if (!media_entity_enum_test_and_set(&iter->ent_enum, entity)) return entity; } return NULL; } EXPORT_SYMBOL_GPL(__media_pipeline_entity_iter_next); /* ----------------------------------------------------------------------------- * Links management */ static struct media_link *media_add_link(struct list_head *head) { struct media_link *link; link = kzalloc(sizeof(*link), GFP_KERNEL); if (link == NULL) return NULL; list_add_tail(&link->list, head); return link; } static void __media_entity_remove_link(struct media_entity *entity, struct media_link *link) { struct media_link *rlink, *tmp; struct media_entity *remote; /* Remove the reverse links for a data link. */ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) { link->source->num_links--; link->sink->num_links--; if (link->source->entity == entity) remote = link->sink->entity; else remote = link->source->entity; list_for_each_entry_safe(rlink, tmp, &remote->links, list) { if (rlink != link->reverse) continue; if (link->source->entity == entity) remote->num_backlinks--; /* Remove the remote link */ list_del(&rlink->list); media_gobj_destroy(&rlink->graph_obj); kfree(rlink); if (--remote->num_links == 0) break; } } list_del(&link->list); media_gobj_destroy(&link->graph_obj); kfree(link); } int media_get_pad_index(struct media_entity *entity, u32 pad_type, enum media_pad_signal_type sig_type) { unsigned int i; if (!entity) return -EINVAL; for (i = 0; i < entity->num_pads; i++) { if ((entity->pads[i].flags & (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) != pad_type) continue; if (entity->pads[i].sig_type == sig_type) return i; } return -EINVAL; } EXPORT_SYMBOL_GPL(media_get_pad_index); int media_create_pad_link(struct media_entity *source, u16 source_pad, struct media_entity *sink, u16 sink_pad, u32 flags) { struct media_link *link; struct media_link *backlink; if (flags & MEDIA_LNK_FL_LINK_TYPE) return -EINVAL; flags |= MEDIA_LNK_FL_DATA_LINK; if (WARN_ON(!source || !sink) || WARN_ON(source_pad >= source->num_pads) || WARN_ON(sink_pad >= sink->num_pads)) return -EINVAL; if (WARN_ON(!(source->pads[source_pad].flags & MEDIA_PAD_FL_SOURCE))) return -EINVAL; if (WARN_ON(!(sink->pads[sink_pad].flags & MEDIA_PAD_FL_SINK))) return -EINVAL; link = media_add_link(&source->links); if (link == NULL) return -ENOMEM; link->source = &source->pads[source_pad]; link->sink = &sink->pads[sink_pad]; link->flags = flags; /* Initialize graph object embedded at the new link */ media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK, &link->graph_obj); /* Create the backlink. Backlinks are used to help graph traversal and * are not reported to userspace. */ backlink = media_add_link(&sink->links); if (backlink == NULL) { __media_entity_remove_link(source, link); return -ENOMEM; } backlink->source = &source->pads[source_pad]; backlink->sink = &sink->pads[sink_pad]; backlink->flags = flags; backlink->is_backlink = true; /* Initialize graph object embedded at the new link */ media_gobj_create(sink->graph_obj.mdev, MEDIA_GRAPH_LINK, &backlink->graph_obj); link->reverse = backlink; backlink->reverse = link; sink->num_backlinks++; sink->num_links++; source->num_links++; link->source->num_links++; link->sink->num_links++; return 0; } EXPORT_SYMBOL_GPL(media_create_pad_link); int media_create_pad_links(const struct media_device *mdev, const u32 source_function, struct media_entity *source, const u16 source_pad, const u32 sink_function, struct media_entity *sink, const u16 sink_pad, u32 flags, const bool allow_both_undefined) { struct media_entity *entity; unsigned function; int ret; /* Trivial case: 1:1 relation */ if (source && sink) return media_create_pad_link(source, source_pad, sink, sink_pad, flags); /* Worse case scenario: n:n relation */ if (!source && !sink) { if (!allow_both_undefined) return 0; media_device_for_each_entity(source, mdev) { if (source->function != source_function) continue; media_device_for_each_entity(sink, mdev) { if (sink->function != sink_function) continue; ret = media_create_pad_link(source, source_pad, sink, sink_pad, flags); if (ret) return ret; flags &= ~(MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } } return 0; } /* Handle 1:n and n:1 cases */ if (source) function = sink_function; else function = source_function; media_device_for_each_entity(entity, mdev) { if (entity->function != function) continue; if (source) ret = media_create_pad_link(source, source_pad, entity, sink_pad, flags); else ret = media_create_pad_link(entity, source_pad, sink, sink_pad, flags); if (ret) return ret; flags &= ~(MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } return 0; } EXPORT_SYMBOL_GPL(media_create_pad_links); void __media_entity_remove_links(struct media_entity *entity) { struct media_link *link, *tmp; list_for_each_entry_safe(link, tmp, &entity->links, list) __media_entity_remove_link(entity, link); entity->num_links = 0; entity->num_backlinks = 0; } EXPORT_SYMBOL_GPL(__media_entity_remove_links); void media_entity_remove_links(struct media_entity *entity) { struct media_device *mdev = entity->graph_obj.mdev; /* Do nothing if the entity is not registered. */ if (mdev == NULL) return; mutex_lock(&mdev->graph_mutex); __media_entity_remove_links(entity); mutex_unlock(&mdev->graph_mutex); } EXPORT_SYMBOL_GPL(media_entity_remove_links); static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) { int ret; /* Notify both entities. */ ret = media_entity_call(link->source->entity, link_setup, link->source, link->sink, flags); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; ret = media_entity_call(link->sink->entity, link_setup, link->sink, link->source, flags); if (ret < 0 && ret != -ENOIOCTLCMD) { media_entity_call(link->source->entity, link_setup, link->source, link->sink, link->flags); return ret; } link->flags = flags; link->reverse->flags = link->flags; return 0; } int __media_entity_setup_link(struct media_link *link, u32 flags) { const u32 mask = MEDIA_LNK_FL_ENABLED; struct media_device *mdev; struct media_pad *source, *sink; int ret = -EBUSY; if (link == NULL) return -EINVAL; /* The non-modifiable link flags must not be modified. */ if ((link->flags & ~mask) != (flags & ~mask)) return -EINVAL; if (link->flags & MEDIA_LNK_FL_IMMUTABLE) return link->flags == flags ? 0 : -EINVAL; if (link->flags == flags) return 0; source = link->source; sink = link->sink; if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) && (media_pad_is_streaming(source) || media_pad_is_streaming(sink))) return -EBUSY; mdev = source->graph_obj.mdev; if (mdev->ops && mdev->ops->link_notify) { ret = mdev->ops->link_notify(link, flags, MEDIA_DEV_NOTIFY_PRE_LINK_CH); if (ret < 0) return ret; } ret = __media_entity_setup_link_notify(link, flags); if (mdev->ops && mdev->ops->link_notify) mdev->ops->link_notify(link, flags, MEDIA_DEV_NOTIFY_POST_LINK_CH); return ret; } EXPORT_SYMBOL_GPL(__media_entity_setup_link); int media_entity_setup_link(struct media_link *link, u32 flags) { int ret; mutex_lock(&link->graph_obj.mdev->graph_mutex); ret = __media_entity_setup_link(link, flags); mutex_unlock(&link->graph_obj.mdev->graph_mutex); return ret; } EXPORT_SYMBOL_GPL(media_entity_setup_link); struct media_link * media_entity_find_link(struct media_pad *source, struct media_pad *sink) { struct media_link *link; for_each_media_entity_data_link(source->entity, link) { if (link->source->entity == source->entity && link->source->index == source->index && link->sink->entity == sink->entity && link->sink->index == sink->index) return link; } return NULL; } EXPORT_SYMBOL_GPL(media_entity_find_link); struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad) { struct media_link *link; for_each_media_entity_data_link(pad->entity, link) { if (!(link->flags & MEDIA_LNK_FL_ENABLED)) continue; if (link->source == pad) return link->sink; if (link->sink == pad) return link->source; } return NULL; } EXPORT_SYMBOL_GPL(media_pad_remote_pad_first); struct media_pad * media_entity_remote_pad_unique(const struct media_entity *entity, unsigned int type) { struct media_pad *pad = NULL; struct media_link *link; list_for_each_entry(link, &entity->links, list) { struct media_pad *local_pad; struct media_pad *remote_pad; if (((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) || !(link->flags & MEDIA_LNK_FL_ENABLED)) continue; if (type == MEDIA_PAD_FL_SOURCE) { local_pad = link->sink; remote_pad = link->source; } else { local_pad = link->source; remote_pad = link->sink; } if (local_pad->entity == entity) { if (pad) return ERR_PTR(-ENOTUNIQ); pad = remote_pad; } } if (!pad) return ERR_PTR(-ENOLINK); return pad; } EXPORT_SYMBOL_GPL(media_entity_remote_pad_unique); struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad) { struct media_pad *found_pad = NULL; struct media_link *link; list_for_each_entry(link, &pad->entity->links, list) { struct media_pad *remote_pad; if (!(link->flags & MEDIA_LNK_FL_ENABLED)) continue; if (link->sink == pad) remote_pad = link->source; else if (link->source == pad) remote_pad = link->sink; else continue; if (found_pad) return ERR_PTR(-ENOTUNIQ); found_pad = remote_pad; } if (!found_pad) return ERR_PTR(-ENOLINK); return found_pad; } EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique); int media_entity_get_fwnode_pad(struct media_entity *entity, const struct fwnode_handle *fwnode, unsigned long direction_flags) { struct fwnode_endpoint endpoint; unsigned int i; int ret; if (!entity->ops || !entity->ops->get_fwnode_pad) { for (i = 0; i < entity->num_pads; i++) { if (entity->pads[i].flags & direction_flags) return i; } return -ENXIO; } ret = fwnode_graph_parse_endpoint(fwnode, &endpoint); if (ret) return ret; ret = entity->ops->get_fwnode_pad(entity, &endpoint); if (ret < 0) return ret; if (ret >= entity->num_pads) return -ENXIO; if (!(entity->pads[ret].flags & direction_flags)) return -ENXIO; return ret; } EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad); struct media_pipeline *media_entity_pipeline(struct media_entity *entity) { struct media_pad *pad; media_entity_for_each_pad(entity, pad) { if (pad->pipe) return pad->pipe; } return NULL; } EXPORT_SYMBOL_GPL(media_entity_pipeline); struct media_pipeline *media_pad_pipeline(struct media_pad *pad) { return pad->pipe; } EXPORT_SYMBOL_GPL(media_pad_pipeline); static void media_interface_init(struct media_device *mdev, struct media_interface *intf, u32 gobj_type, u32 intf_type, u32 flags) { intf->type = intf_type; intf->flags = flags; INIT_LIST_HEAD(&intf->links); media_gobj_create(mdev, gobj_type, &intf->graph_obj); } /* Functions related to the media interface via device nodes */ struct media_intf_devnode *media_devnode_create(struct media_device *mdev, u32 type, u32 flags, u32 major, u32 minor) { struct media_intf_devnode *devnode; devnode = kzalloc(sizeof(*devnode), GFP_KERNEL); if (!devnode) return NULL; devnode->major = major; devnode->minor = minor; media_interface_init(mdev, &devnode->intf, MEDIA_GRAPH_INTF_DEVNODE, type, flags); return devnode; } EXPORT_SYMBOL_GPL(media_devnode_create); void media_devnode_remove(struct media_intf_devnode *devnode) { media_remove_intf_links(&devnode->intf); media_gobj_destroy(&devnode->intf.graph_obj); kfree(devnode); } EXPORT_SYMBOL_GPL(media_devnode_remove); struct media_link *media_create_intf_link(struct media_entity *entity, struct media_interface *intf, u32 flags) { struct media_link *link; link = media_add_link(&intf->links); if (link == NULL) return NULL; link->intf = intf; link->entity = entity; link->flags = flags | MEDIA_LNK_FL_INTERFACE_LINK; /* Initialize graph object embedded at the new link */ media_gobj_create(intf->graph_obj.mdev, MEDIA_GRAPH_LINK, &link->graph_obj); return link; } EXPORT_SYMBOL_GPL(media_create_intf_link); void __media_remove_intf_link(struct media_link *link) { list_del(&link->list); media_gobj_destroy(&link->graph_obj); kfree(link); } EXPORT_SYMBOL_GPL(__media_remove_intf_link); void media_remove_intf_link(struct media_link *link) { struct media_device *mdev = link->graph_obj.mdev; /* Do nothing if the intf is not registered. */ if (mdev == NULL) return; mutex_lock(&mdev->graph_mutex); __media_remove_intf_link(link); mutex_unlock(&mdev->graph_mutex); } EXPORT_SYMBOL_GPL(media_remove_intf_link); void __media_remove_intf_links(struct media_interface *intf) { struct media_link *link, *tmp; list_for_each_entry_safe(link, tmp, &intf->links, list) __media_remove_intf_link(link); } EXPORT_SYMBOL_GPL(__media_remove_intf_links); void media_remove_intf_links(struct media_interface *intf) { struct media_device *mdev = intf->graph_obj.mdev; /* Do nothing if the intf is not registered. */ if (mdev == NULL) return; mutex_lock(&mdev->graph_mutex); __media_remove_intf_links(intf); mutex_unlock(&mdev->graph_mutex); } EXPORT_SYMBOL_GPL(media_remove_intf_links); struct media_link *media_create_ancillary_link(struct media_entity *primary, struct media_entity *ancillary) { struct media_link *link; link = media_add_link(&primary->links); if (!link) return ERR_PTR(-ENOMEM); link->gobj0 = &primary->graph_obj; link->gobj1 = &ancillary->graph_obj; link->flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_ANCILLARY_LINK; /* Initialize graph object embedded in the new link */ media_gobj_create(primary->graph_obj.mdev, MEDIA_GRAPH_LINK, &link->graph_obj); return link; } EXPORT_SYMBOL_GPL(media_create_ancillary_link); struct media_link *__media_entity_next_link(struct media_entity *entity, struct media_link *link, unsigned long link_type) { link = link ? list_next_entry(link, list) : list_first_entry(&entity->links, typeof(*link), list); list_for_each_entry_from(link, &entity->links, list) if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == link_type) return link; return NULL; } EXPORT_SYMBOL_GPL(__media_entity_next_link);
6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Keytouch devices not fully compliant with HID standard * * Copyright (c) 2011 Jiri Kosina */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* Replace the broken report descriptor of this device with rather * a default one */ static __u8 keytouch_fixed_rdesc[] = { 0x05, 0x01, 0x09, 0x06, 0xa1, 0x01, 0x05, 0x07, 0x19, 0xe0, 0x29, 0xe7, 0x15, 0x00, 0x25, 0x01, 0x75, 0x01, 0x95, 0x08, 0x81, 0x02, 0x95, 0x01, 0x75, 0x08, 0x81, 0x01, 0x95, 0x03, 0x75, 0x01, 0x05, 0x08, 0x19, 0x01, 0x29, 0x03, 0x91, 0x02, 0x95, 0x05, 0x75, 0x01, 0x91, 0x01, 0x95, 0x06, 0x75, 0x08, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05, 0x07, 0x19, 0x00, 0x2a, 0xff, 0x00, 0x81, 0x00, 0xc0 }; static __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { hid_info(hdev, "fixing up Keytouch IEC report descriptor\n"); rdesc = keytouch_fixed_rdesc; *rsize = sizeof(keytouch_fixed_rdesc); return rdesc; } static const struct hid_device_id keytouch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, { } }; MODULE_DEVICE_TABLE(hid, keytouch_devices); static struct hid_driver keytouch_driver = { .name = "keytouch", .id_table = keytouch_devices, .report_fixup = keytouch_report_fixup, }; module_hid_driver(keytouch_driver); MODULE_DESCRIPTION("HID driver for Keytouch devices not fully compliant with HID standard"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jiri Kosina");
17 17 14 12 12 2 17 17 17 17 17 17 17 17 17 17 10 7 15 2 17 2 15 2 2 2 2 2 2 17 17 17 5 5 5 5 5 5 5 5 5 5 1 4 4 4 5 17 14 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #include "bat_iv_ogm.h" #include "main.h" #include <linux/atomic.h> #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/bug.h> #include <linux/byteorder/generic.h> #include <linux/cache.h> #include <linux/container_of.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/pkt_sched.h> #include <linux/printk.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> #include <net/genetlink.h> #include <net/netlink.h> #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h> #include "bat_algo.h" #include "bitarray.h" #include "gateway_client.h" #include "hard-interface.h" #include "hash.h" #include "log.h" #include "netlink.h" #include "network-coding.h" #include "originator.h" #include "routing.h" #include "send.h" #include "translation-table.h" #include "tvlv.h" static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work); /** * enum batadv_dup_status - duplicate status */ enum batadv_dup_status { /** @BATADV_NO_DUP: the packet is no duplicate */ BATADV_NO_DUP = 0, /** * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for * the neighbor) */ BATADV_ORIG_DUP, /** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */ BATADV_NEIGH_DUP, /** * @BATADV_PROTECTED: originator is currently protected (after reboot) */ BATADV_PROTECTED, }; /** * batadv_ring_buffer_set() - update the ring buffer with the given value * @lq_recv: pointer to the ring buffer * @lq_index: index to store the value at * @value: value to store in the ring buffer */ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value) { lq_recv[*lq_index] = value; *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE; } /** * batadv_ring_buffer_avg() - compute the average of all non-zero values stored * in the given ring buffer * @lq_recv: pointer to the ring buffer * * Return: computed average value. */ static u8 batadv_ring_buffer_avg(const u8 lq_recv[]) { const u8 *ptr; u16 count = 0; u16 i = 0; u16 sum = 0; ptr = lq_recv; while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) { if (*ptr != 0) { count++; sum += *ptr; } i++; ptr++; } if (count == 0) return 0; return (u8)(sum / count); } /** * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an * originator * @bat_priv: the bat priv with all the soft interface information * @addr: mac address of the originator * * Return: the originator object corresponding to the passed mac address or NULL * on failure. * If the object does not exist, it is created and initialised. */ static struct batadv_orig_node * batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) { struct batadv_orig_node *orig_node; int hash_added; orig_node = batadv_orig_hash_find(bat_priv, addr); if (orig_node) return orig_node; orig_node = batadv_orig_node_new(bat_priv, addr); if (!orig_node) return NULL; spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock); kref_get(&orig_node->refcount); hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, batadv_choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) goto free_orig_node_hash; return orig_node; free_orig_node_hash: /* reference for batadv_hash_add */ batadv_orig_node_put(orig_node); /* reference from batadv_orig_node_new */ batadv_orig_node_put(orig_node); return NULL; } static struct batadv_neigh_node * batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, const u8 *neigh_addr, struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_neigh) { struct batadv_neigh_node *neigh_node; neigh_node = batadv_neigh_node_get_or_create(orig_node, hard_iface, neigh_addr); if (!neigh_node) goto out; neigh_node->orig_node = orig_neigh; out: return neigh_node; } static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) { struct batadv_ogm_packet *batadv_ogm_packet; unsigned char *ogm_buff; u32 random_seqno; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); /* randomize initial seqno to avoid collision */ get_random_bytes(&random_seqno, sizeof(random_seqno)); atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno); hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); if (!ogm_buff) { mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); return -ENOMEM; } hard_iface->bat_iv.ogm_buff = ogm_buff; batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; batadv_ogm_packet->packet_type = BATADV_IV_OGM; batadv_ogm_packet->version = BATADV_COMPAT_VERSION; batadv_ogm_packet->ttl = 2; batadv_ogm_packet->flags = BATADV_NO_FLAGS; batadv_ogm_packet->reserved = 0; batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); return 0; } static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) { mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); kfree(hard_iface->bat_iv.ogm_buff); hard_iface->bat_iv.ogm_buff = NULL; mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) { struct batadv_ogm_packet *batadv_ogm_packet; void *ogm_buff; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ogm_buff = hard_iface->bat_iv.ogm_buff; if (!ogm_buff) goto unlock; batadv_ogm_packet = ogm_buff; ether_addr_copy(batadv_ogm_packet->orig, hard_iface->net_dev->dev_addr); ether_addr_copy(batadv_ogm_packet->prev_sender, hard_iface->net_dev->dev_addr); unlock: mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } static void batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) { struct batadv_ogm_packet *batadv_ogm_packet; void *ogm_buff; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ogm_buff = hard_iface->bat_iv.ogm_buff; if (!ogm_buff) goto unlock; batadv_ogm_packet = ogm_buff; batadv_ogm_packet->ttl = BATADV_TTL; unlock: mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } /* when do we schedule our own ogm to be sent */ static unsigned long batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv) { unsigned int msecs; msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER; msecs += get_random_u32_below(2 * BATADV_JITTER); return jiffies + msecs_to_jiffies(msecs); } /* when do we schedule a ogm packet to be sent */ static unsigned long batadv_iv_ogm_fwd_send_time(void) { return jiffies + msecs_to_jiffies(get_random_u32_below(BATADV_JITTER / 2)); } /* apply hop penalty for a normal link */ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv) { int hop_penalty = atomic_read(&bat_priv->hop_penalty); int new_tq; new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty); new_tq /= BATADV_TQ_MAX_VALUE; return new_tq; } /** * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached * @buff_pos: current position in the skb * @packet_len: total length of the skb * @ogm_packet: potential OGM in buffer * * Return: true if there is enough space for another OGM, false otherwise. */ static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, const struct batadv_ogm_packet *ogm_packet) { int next_buff_pos = 0; /* check if there is enough space for the header */ next_buff_pos += buff_pos + sizeof(*ogm_packet); if (next_buff_pos > packet_len) return false; /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm_packet->tvlv_len); return (next_buff_pos <= packet_len) && (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); } /* send a batman ogm to a given interface */ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); const char *fwd_str; u8 packet_num; s16 buff_pos; struct batadv_ogm_packet *batadv_ogm_packet; struct sk_buff *skb; u8 *packet_pos; if (hard_iface->if_status != BATADV_IF_ACTIVE) return; packet_num = 0; buff_pos = 0; packet_pos = forw_packet->skb->data; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; /* adjust all flags and log packets */ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, batadv_ogm_packet)) { /* we might have aggregated direct link packets with an * ordinary base packet */ if (forw_packet->direct_link_flags & BIT(packet_num) && forw_packet->if_incoming == hard_iface) batadv_ogm_packet->flags |= BATADV_DIRECTLINK; else batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK; if (packet_num > 0 || !forw_packet->own) fwd_str = "Forwarding"; else fwd_str = "Sending own"; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n", fwd_str, (packet_num > 0 ? "aggregated " : ""), batadv_ogm_packet->orig, ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq, batadv_ogm_packet->ttl, ((batadv_ogm_packet->flags & BATADV_DIRECTLINK) ? "on" : "off"), hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); buff_pos += BATADV_OGM_HLEN; buff_pos += ntohs(batadv_ogm_packet->tvlv_len); packet_num++; packet_pos = forw_packet->skb->data + buff_pos; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; } /* create clone because function is called more than once */ skb = skb_clone(forw_packet->skb, GFP_ATOMIC); if (skb) { batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, skb->len + ETH_HLEN); batadv_send_broadcast_skb(skb, hard_iface); } } /* send a batman ogm packet */ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) { struct net_device *soft_iface; if (!forw_packet->if_incoming) { pr_err("Error - can't forward packet: incoming iface not specified\n"); return; } soft_iface = forw_packet->if_incoming->soft_iface; if (WARN_ON(!forw_packet->if_outgoing)) return; if (forw_packet->if_outgoing->soft_iface != soft_iface) { pr_warn("%s: soft interface switch for queued OGM\n", __func__); return; } if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE) return; /* only for one specific outgoing interface */ batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing); } /** * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an * existing forward packet * @new_bat_ogm_packet: OGM packet to be aggregated * @bat_priv: the bat priv with all the soft interface information * @packet_len: (total) length of the OGM * @send_time: timestamp (jiffies) when the packet is to be sent * @directlink: true if this is a direct link packet * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @forw_packet: the forwarded packet which should be checked * * Return: true if new_packet can be aggregated with forw_packet */ static bool batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, struct batadv_priv *bat_priv, int packet_len, unsigned long send_time, bool directlink, const struct batadv_hard_iface *if_incoming, const struct batadv_hard_iface *if_outgoing, const struct batadv_forw_packet *forw_packet) { struct batadv_ogm_packet *batadv_ogm_packet; int aggregated_bytes = forw_packet->packet_len + packet_len; struct batadv_hard_iface *primary_if = NULL; bool res = false; unsigned long aggregation_end_time; batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data; aggregation_end_time = send_time; aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS); /* we can aggregate the current packet to this aggregated packet * if: * * - the send time is within our MAX_AGGREGATION_MS time * - the resulting packet won't be bigger than * MAX_AGGREGATION_BYTES * otherwise aggregation is not possible */ if (!time_before(send_time, forw_packet->send_time) || !time_after_eq(aggregation_end_time, forw_packet->send_time)) return false; if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES) return false; /* packet is not leaving on the same interface. */ if (forw_packet->if_outgoing != if_outgoing) return false; /* check aggregation compatibility * -> direct link packets are broadcasted on * their interface only * -> aggregate packet if the current packet is * a "global" packet as well as the base * packet */ primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return false; /* packets without direct link flag and high TTL * are flooded through the net */ if (!directlink && !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) && batadv_ogm_packet->ttl != 1 && /* own packets originating non-primary * interfaces leave only that interface */ (!forw_packet->own || forw_packet->if_incoming == primary_if)) { res = true; goto out; } /* if the incoming packet is sent via this one * interface only - we still can aggregate */ if (directlink && new_bat_ogm_packet->ttl == 1 && forw_packet->if_incoming == if_incoming && /* packets from direct neighbors or * own secondary interface packets * (= secondary interface packets in general) */ (batadv_ogm_packet->flags & BATADV_DIRECTLINK || (forw_packet->own && forw_packet->if_incoming != primary_if))) { res = true; goto out; } out: batadv_hardif_put(primary_if); return res; } /** * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this * packet to it. * @packet_buff: pointer to the OGM * @packet_len: (total) length of the OGM * @send_time: timestamp (jiffies) when the packet is to be sent * @direct_link: whether this OGM has direct link status * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @own_packet: true if it is a self-generated ogm */ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, int packet_len, unsigned long send_time, bool direct_link, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, int own_packet) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_forw_packet *forw_packet_aggr; struct sk_buff *skb; unsigned char *skb_buff; unsigned int skb_size; atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left; if (atomic_read(&bat_priv->aggregated_ogms) && packet_len < BATADV_MAX_AGGREGATION_BYTES) skb_size = BATADV_MAX_AGGREGATION_BYTES; else skb_size = packet_len; skb_size += ETH_HLEN; skb = netdev_alloc_skb_ip_align(NULL, skb_size); if (!skb) return; forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing, queue_left, bat_priv, skb); if (!forw_packet_aggr) { kfree_skb(skb); return; } forw_packet_aggr->skb->priority = TC_PRIO_CONTROL; skb_reserve(forw_packet_aggr->skb, ETH_HLEN); skb_buff = skb_put(forw_packet_aggr->skb, packet_len); forw_packet_aggr->packet_len = packet_len; memcpy(skb_buff, packet_buff, packet_len); forw_packet_aggr->own = own_packet; forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS; forw_packet_aggr->send_time = send_time; /* save packet direct link flag status */ if (direct_link) forw_packet_aggr->direct_link_flags |= 1; INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, batadv_iv_send_outstanding_bat_ogm_packet); batadv_forw_packet_ogmv1_queue(bat_priv, forw_packet_aggr, send_time); } /* aggregate a new packet into the existing ogm packet */ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr, const unsigned char *packet_buff, int packet_len, bool direct_link) { unsigned long new_direct_link_flag; skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len); forw_packet_aggr->packet_len += packet_len; forw_packet_aggr->num_packets++; /* save packet direct link flag status */ if (direct_link) { new_direct_link_flag = BIT(forw_packet_aggr->num_packets); forw_packet_aggr->direct_link_flags |= new_direct_link_flag; } } /** * batadv_iv_ogm_queue_add() - queue up an OGM for transmission * @bat_priv: the bat priv with all the soft interface information * @packet_buff: pointer to the OGM * @packet_len: (total) length of the OGM * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @own_packet: true if it is a self-generated ogm * @send_time: timestamp (jiffies) when the packet is to be sent */ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, unsigned char *packet_buff, int packet_len, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, int own_packet, unsigned long send_time) { /* _aggr -> pointer to the packet we want to aggregate with * _pos -> pointer to the position in the queue */ struct batadv_forw_packet *forw_packet_aggr = NULL; struct batadv_forw_packet *forw_packet_pos = NULL; struct batadv_ogm_packet *batadv_ogm_packet; bool direct_link; unsigned long max_aggregation_jiffies; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK); max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS); /* find position for the packet in the forward queue */ spin_lock_bh(&bat_priv->forw_bat_list_lock); /* own packets are not to be aggregated */ if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) { hlist_for_each_entry(forw_packet_pos, &bat_priv->forw_bat_list, list) { if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, bat_priv, packet_len, send_time, direct_link, if_incoming, if_outgoing, forw_packet_pos)) { forw_packet_aggr = forw_packet_pos; break; } } } /* nothing to aggregate with - either aggregation disabled or no * suitable aggregation packet found */ if (!forw_packet_aggr) { /* the following section can run without the lock */ spin_unlock_bh(&bat_priv->forw_bat_list_lock); /* if we could not aggregate this packet with one of the others * we hold it back for a while, so that it might be aggregated * later on */ if (!own_packet && atomic_read(&bat_priv->aggregated_ogms)) send_time += max_aggregation_jiffies; batadv_iv_ogm_aggregate_new(packet_buff, packet_len, send_time, direct_link, if_incoming, if_outgoing, own_packet); } else { batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len, direct_link); spin_unlock_bh(&bat_priv->forw_bat_list_lock); } } static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, const struct ethhdr *ethhdr, struct batadv_ogm_packet *batadv_ogm_packet, bool is_single_hop_neigh, bool is_from_best_next_hop, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); u16 tvlv_len; if (batadv_ogm_packet->ttl <= 1) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n"); return; } if (!is_from_best_next_hop) { /* Mark the forwarded packet when it is not coming from our * best next hop. We still need to forward the packet for our * neighbor link quality detection to work in case the packet * originated from a single hop neighbor. Otherwise we can * simply drop the ogm. */ if (is_single_hop_neigh) batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP; else return; } tvlv_len = ntohs(batadv_ogm_packet->tvlv_len); batadv_ogm_packet->ttl--; ether_addr_copy(batadv_ogm_packet->prev_sender, ethhdr->h_source); /* apply hop penalty */ batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq, bat_priv); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding packet: tq: %i, ttl: %i\n", batadv_ogm_packet->tq, batadv_ogm_packet->ttl); if (is_single_hop_neigh) batadv_ogm_packet->flags |= BATADV_DIRECTLINK; else batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK; batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet, BATADV_OGM_HLEN + tvlv_len, if_incoming, if_outgoing, 0, batadv_iv_ogm_fwd_send_time()); } /** * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows * for the given interface * @hard_iface: the interface for which the windows have to be shifted */ static void batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo; unsigned long *word; u32 i; u8 *w; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) { if (orig_ifinfo->if_outgoing != hard_iface) continue; spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); word = orig_ifinfo->bat_iv.bcast_own; batadv_bit_get_packet(bat_priv, word, 1, 0); w = &orig_ifinfo->bat_iv.bcast_own_sum; *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); } } rcu_read_unlock(); } } /** * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer * @hard_iface: interface whose ogm buffer should be transmitted */ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff; struct batadv_ogm_packet *batadv_ogm_packet; struct batadv_hard_iface *primary_if, *tmp_hard_iface; int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len; u32 seqno; u16 tvlv_len = 0; unsigned long send_time; lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex); /* interface already disabled by batadv_iv_ogm_iface_disable */ if (!*ogm_buff) return; /* the interface gets activated here to avoid race conditions between * the moment of activating the interface in * hardif_activate_interface() where the originator mac is set and * outdated packets (especially uninitialized mac addresses) in the * packet queue */ if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) hard_iface->if_status = BATADV_IF_ACTIVE; primary_if = batadv_primary_if_get_selected(bat_priv); if (hard_iface == primary_if) { /* tt changes have to be committed before the tvlv data is * appended as it may alter the tt tvlv container */ batadv_tt_local_commit_changes(bat_priv); tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff, ogm_buff_len, BATADV_OGM_HLEN); } batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff); batadv_ogm_packet->tvlv_len = htons(tvlv_len); /* change sequence number to network order */ seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno); batadv_ogm_packet->seqno = htonl(seqno); atomic_inc(&hard_iface->bat_iv.ogm_seqno); batadv_iv_ogm_slide_own_bcast_window(hard_iface); send_time = batadv_iv_ogm_emit_send_time(bat_priv); if (hard_iface != primary_if) { /* OGMs from secondary interfaces are only scheduled on their * respective interfaces. */ batadv_iv_ogm_queue_add(bat_priv, *ogm_buff, *ogm_buff_len, hard_iface, hard_iface, 1, send_time); goto out; } /* OGMs from primary interfaces are scheduled on all * interfaces. */ rcu_read_lock(); list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) { if (tmp_hard_iface->soft_iface != hard_iface->soft_iface) continue; if (!kref_get_unless_zero(&tmp_hard_iface->refcount)) continue; batadv_iv_ogm_queue_add(bat_priv, *ogm_buff, *ogm_buff_len, hard_iface, tmp_hard_iface, 1, send_time); batadv_hardif_put(tmp_hard_iface); } rcu_read_unlock(); out: batadv_hardif_put(primary_if); } static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) { if (hard_iface->if_status == BATADV_IF_NOT_IN_USE || hard_iface->if_status == BATADV_IF_TO_BE_REMOVED) return; mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); batadv_iv_ogm_schedule_buff(hard_iface); mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); } /** * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface * @orig_node: originator which reproadcasted the OGMs directly * @if_outgoing: interface which transmitted the original OGM and received the * direct rebroadcast * * Return: Number of replied (rebroadcasted) OGMs which were transmitted by * an originator and directly (without intermediate hop) received by a specific * interface */ static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing) { struct batadv_orig_ifinfo *orig_ifinfo; u8 sum; orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing); if (!orig_ifinfo) return 0; spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); sum = orig_ifinfo->bat_iv.bcast_own_sum; spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_ifinfo_put(orig_ifinfo); return sum; } /** * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an * originator * @bat_priv: the bat priv with all the soft interface information * @orig_node: the orig node who originally emitted the ogm packet * @orig_ifinfo: ifinfo for the outgoing interface of the orig_node * @ethhdr: Ethernet header of the OGM * @batadv_ogm_packet: the ogm packet * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * @dup_status: the duplicate status of this ogm packet. */ static void batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_orig_ifinfo *orig_ifinfo, const struct ethhdr *ethhdr, const struct batadv_ogm_packet *batadv_ogm_packet, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, enum batadv_dup_status dup_status) { struct batadv_neigh_ifinfo *neigh_ifinfo = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *neigh_node = NULL; struct batadv_neigh_node *tmp_neigh_node = NULL; struct batadv_neigh_node *router = NULL; u8 sum_orig, sum_neigh; u8 *neigh_addr; u8 tq_avg; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s(): Searching and updating originator entry of received packet\n", __func__); rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { neigh_addr = tmp_neigh_node->addr; if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && tmp_neigh_node->if_incoming == if_incoming && kref_get_unless_zero(&tmp_neigh_node->refcount)) { if (WARN(neigh_node, "too many matching neigh_nodes")) batadv_neigh_node_put(neigh_node); neigh_node = tmp_neigh_node; continue; } if (dup_status != BATADV_NO_DUP) continue; /* only update the entry for this outgoing interface */ neigh_ifinfo = batadv_neigh_ifinfo_get(tmp_neigh_node, if_outgoing); if (!neigh_ifinfo) continue; spin_lock_bh(&tmp_neigh_node->ifinfo_lock); batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv, &neigh_ifinfo->bat_iv.tq_index, 0); tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv); neigh_ifinfo->bat_iv.tq_avg = tq_avg; spin_unlock_bh(&tmp_neigh_node->ifinfo_lock); batadv_neigh_ifinfo_put(neigh_ifinfo); neigh_ifinfo = NULL; } if (!neigh_node) { struct batadv_orig_node *orig_tmp; orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_tmp) goto unlock; neigh_node = batadv_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, orig_node, orig_tmp); batadv_orig_node_put(orig_tmp); if (!neigh_node) goto unlock; } else { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Updating existing last-hop neighbor of originator\n"); } rcu_read_unlock(); neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (!neigh_ifinfo) goto out; neigh_node->last_seen = jiffies; spin_lock_bh(&neigh_node->ifinfo_lock); batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv, &neigh_ifinfo->bat_iv.tq_index, batadv_ogm_packet->tq); tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv); neigh_ifinfo->bat_iv.tq_avg = tq_avg; spin_unlock_bh(&neigh_node->ifinfo_lock); if (dup_status == BATADV_NO_DUP) { orig_ifinfo->last_ttl = batadv_ogm_packet->ttl; neigh_ifinfo->last_ttl = batadv_ogm_packet->ttl; } /* if this neighbor already is our next hop there is nothing * to change */ router = batadv_orig_router_get(orig_node, if_outgoing); if (router == neigh_node) goto out; if (router) { router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing); if (!router_ifinfo) goto out; /* if this neighbor does not offer a better TQ we won't * consider it */ if (router_ifinfo->bat_iv.tq_avg > neigh_ifinfo->bat_iv.tq_avg) goto out; } /* if the TQ is the same and the link not more symmetric we * won't consider it either */ if (router_ifinfo && neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) { sum_orig = batadv_iv_orig_ifinfo_sum(router->orig_node, router->if_incoming); sum_neigh = batadv_iv_orig_ifinfo_sum(neigh_node->orig_node, neigh_node->if_incoming); if (sum_orig >= sum_neigh) goto out; } batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node); goto out; unlock: rcu_read_unlock(); out: batadv_neigh_node_put(neigh_node); batadv_neigh_node_put(router); batadv_neigh_ifinfo_put(neigh_ifinfo); batadv_neigh_ifinfo_put(router_ifinfo); } /** * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet * @orig_node: the orig node who originally emitted the ogm packet * @orig_neigh_node: the orig node struct of the neighbor who sent the packet * @batadv_ogm_packet: the ogm packet * @if_incoming: interface where the packet was received * @if_outgoing: interface for which the retransmission should be considered * * Return: true if the link can be considered bidirectional, false otherwise */ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, struct batadv_orig_node *orig_neigh_node, struct batadv_ogm_packet *batadv_ogm_packet, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int combined_tq; bool ret = false; /* find corresponding one hop neighbor */ rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_neigh_node->neigh_list, list) { if (!batadv_compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig)) continue; if (tmp_neigh_node->if_incoming != if_incoming) continue; if (!kref_get_unless_zero(&tmp_neigh_node->refcount)) continue; neigh_node = tmp_neigh_node; break; } rcu_read_unlock(); if (!neigh_node) neigh_node = batadv_iv_ogm_neigh_new(if_incoming, orig_neigh_node->orig, orig_neigh_node, orig_neigh_node); if (!neigh_node) goto out; /* if orig_node is direct neighbor update neigh_node last_seen */ if (orig_node == orig_neigh_node) neigh_node->last_seen = jiffies; orig_node->last_seen = jiffies; /* find packet count of corresponding one hop neighbor */ orig_eq_count = batadv_iv_orig_ifinfo_sum(orig_neigh_node, if_incoming); neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (neigh_ifinfo) { neigh_rq_count = neigh_ifinfo->bat_iv.real_packet_count; batadv_neigh_ifinfo_put(neigh_ifinfo); } else { neigh_rq_count = 0; } /* pay attention to not get a value bigger than 100 % */ if (orig_eq_count > neigh_rq_count) total_count = neigh_rq_count; else total_count = orig_eq_count; /* if we have too few packets (too less data) we set tq_own to zero * if we receive too few packets it is not considered bidirectional */ if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM || neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM) tq_own = 0; else /* neigh_node->real_packet_count is never zero as we * only purge old information when getting new * information */ tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count; /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does * affect the nearly-symmetric links only a little, but * punishes asymmetric links more. This will give a value * between 0 and TQ_MAX_VALUE */ neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count; neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv; neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube; inv_asym_penalty /= neigh_rq_max_cube; tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty; tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty); /* penalize if the OGM is forwarded on the same interface. WiFi * interfaces and other half duplex devices suffer from throughput * drops as they can't send and receive at the same time. */ if (if_outgoing && if_incoming == if_outgoing && batadv_is_wifi_hardif(if_outgoing)) tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty, bat_priv); combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty * tq_iface_hop_penalty; combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE; batadv_ogm_packet->tq = combined_tq; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", orig_node->orig, orig_neigh_node->orig, total_count, neigh_rq_count, tq_own, tq_asym_penalty, tq_iface_hop_penalty, batadv_ogm_packet->tq, if_incoming->net_dev->name, if_outgoing ? if_outgoing->net_dev->name : "DEFAULT"); /* if link has the minimum required transmission quality * consider it bidirectional */ if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT) ret = true; out: batadv_neigh_node_put(neigh_node); return ret; } /** * batadv_iv_ogm_update_seqnos() - process a batman packet for all interfaces, * adjust the sequence number and find out whether it is a duplicate * @ethhdr: ethernet header of the packet * @batadv_ogm_packet: OGM packet to be considered * @if_incoming: interface on which the OGM packet was received * @if_outgoing: interface for which the retransmission should be considered * * Return: duplicate status as enum batadv_dup_status */ static enum batadv_dup_status batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, const struct batadv_ogm_packet *batadv_ogm_packet, const struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo = NULL; struct batadv_neigh_node *neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; bool is_dup; s32 seq_diff; bool need_update = false; int set_mark; enum batadv_dup_status ret = BATADV_NO_DUP; u32 seqno = ntohl(batadv_ogm_packet->seqno); u8 *neigh_addr; u8 packet_count; unsigned long *bitmap; orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig); if (!orig_node) return BATADV_NO_DUP; orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); if (WARN_ON(!orig_ifinfo)) { batadv_orig_node_put(orig_node); return 0; } spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); seq_diff = seqno - orig_ifinfo->last_real_seqno; /* signalize caller that the packet is to be dropped. */ if (!hlist_empty(&orig_node->neigh_list) && batadv_window_protected(bat_priv, seq_diff, BATADV_TQ_LOCAL_WINDOW_SIZE, &orig_ifinfo->batman_seqno_reset, NULL)) { ret = BATADV_PROTECTED; goto out; } rcu_read_lock(); hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) { neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); if (!neigh_ifinfo) continue; neigh_addr = neigh_node->addr; is_dup = batadv_test_bit(neigh_ifinfo->bat_iv.real_bits, orig_ifinfo->last_real_seqno, seqno); if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && neigh_node->if_incoming == if_incoming) { set_mark = 1; if (is_dup) ret = BATADV_NEIGH_DUP; } else { set_mark = 0; if (is_dup && ret != BATADV_NEIGH_DUP) ret = BATADV_ORIG_DUP; } /* if the window moved, set the update flag. */ bitmap = neigh_ifinfo->bat_iv.real_bits; need_update |= batadv_bit_get_packet(bat_priv, bitmap, seq_diff, set_mark); packet_count = bitmap_weight(bitmap, BATADV_TQ_LOCAL_WINDOW_SIZE); neigh_ifinfo->bat_iv.real_packet_count = packet_count; batadv_neigh_ifinfo_put(neigh_ifinfo); } rcu_read_unlock(); if (need_update) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s updating last_seqno: old %u, new %u\n", if_outgoing ? if_outgoing->net_dev->name : "DEFAULT", orig_ifinfo->last_real_seqno, seqno); orig_ifinfo->last_real_seqno = seqno; } out: spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_node_put(orig_node); batadv_orig_ifinfo_put(orig_ifinfo); return ret; } /** * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing * interface * @skb: the skb containing the OGM * @ogm_offset: offset from skb->data to start of ogm header * @orig_node: the (cached) orig node for the originator of this OGM * @if_incoming: the interface where this packet was received * @if_outgoing: the interface for which the packet should be considered */ static void batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset, struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_hardif_neigh_node *hardif_neigh = NULL; struct batadv_neigh_node *router = NULL; struct batadv_neigh_node *router_router = NULL; struct batadv_orig_node *orig_neigh_node; struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_node *orig_neigh_router = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_ogm_packet *ogm_packet; enum batadv_dup_status dup_status; bool is_from_best_next_hop = false; bool is_single_hop_neigh = false; bool sameseq, similar_ttl; struct sk_buff *skb_priv; struct ethhdr *ethhdr; u8 *prev_sender; bool is_bidirect; /* create a private copy of the skb, as some functions change tq value * and/or flags. */ skb_priv = skb_copy(skb, GFP_ATOMIC); if (!skb_priv) return; ethhdr = eth_hdr(skb_priv); ogm_packet = (struct batadv_ogm_packet *)(skb_priv->data + ogm_offset); dup_status = batadv_iv_ogm_update_seqnos(ethhdr, ogm_packet, if_incoming, if_outgoing); if (batadv_compare_eth(ethhdr->h_source, ogm_packet->orig)) is_single_hop_neigh = true; if (dup_status == BATADV_PROTECTED) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: packet within seqno protection time (sender: %pM)\n", ethhdr->h_source); goto out; } if (ogm_packet->tq == 0) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: originator packet with tq equal 0\n"); goto out; } if (is_single_hop_neigh) { hardif_neigh = batadv_hardif_neigh_get(if_incoming, ethhdr->h_source); if (hardif_neigh) hardif_neigh->last_seen = jiffies; } router = batadv_orig_router_get(orig_node, if_outgoing); if (router) { router_router = batadv_orig_router_get(router->orig_node, if_outgoing); router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing); } if ((router_ifinfo && router_ifinfo->bat_iv.tq_avg != 0) && (batadv_compare_eth(router->addr, ethhdr->h_source))) is_from_best_next_hop = true; prev_sender = ogm_packet->prev_sender; /* avoid temporary routing loops */ if (router && router_router && (batadv_compare_eth(router->addr, prev_sender)) && !(batadv_compare_eth(ogm_packet->orig, prev_sender)) && (batadv_compare_eth(router->addr, router_router->addr))) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n", ethhdr->h_source); goto out; } if (if_outgoing == BATADV_IF_DEFAULT) batadv_tvlv_ogm_receive(bat_priv, ogm_packet, orig_node); /* if sender is a direct neighbor the sender mac equals * originator mac */ if (is_single_hop_neigh) orig_neigh_node = orig_node; else orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_neigh_node) goto out; /* Update nc_nodes of the originator */ batadv_nc_update_nc_node(bat_priv, orig_node, orig_neigh_node, ogm_packet, is_single_hop_neigh); orig_neigh_router = batadv_orig_router_get(orig_neigh_node, if_outgoing); /* drop packet if sender is not a direct neighbor and if we * don't route towards it */ if (!is_single_hop_neigh && !orig_neigh_router) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: OGM via unknown neighbor!\n"); goto out_neigh; } is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node, ogm_packet, if_incoming, if_outgoing); /* update ranking if it is not a duplicate or has the same * seqno and similar ttl as the non-duplicate */ orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing); if (!orig_ifinfo) goto out_neigh; sameseq = orig_ifinfo->last_real_seqno == ntohl(ogm_packet->seqno); similar_ttl = (orig_ifinfo->last_ttl - 3) <= ogm_packet->ttl; if (is_bidirect && (dup_status == BATADV_NO_DUP || (sameseq && similar_ttl))) { batadv_iv_ogm_orig_update(bat_priv, orig_node, orig_ifinfo, ethhdr, ogm_packet, if_incoming, if_outgoing, dup_status); } batadv_orig_ifinfo_put(orig_ifinfo); /* only forward for specific interface, not for the default one. */ if (if_outgoing == BATADV_IF_DEFAULT) goto out_neigh; /* is single hop (direct) neighbor */ if (is_single_hop_neigh) { /* OGMs from secondary interfaces should only scheduled once * per interface where it has been received, not multiple times */ if (ogm_packet->ttl <= 2 && if_incoming != if_outgoing) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: OGM from secondary interface and wrong outgoing interface\n"); goto out_neigh; } /* mark direct link on incoming interface */ batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet, is_single_hop_neigh, is_from_best_next_hop, if_incoming, if_outgoing); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast neighbor packet with direct link flag\n"); goto out_neigh; } /* multihop originator */ if (!is_bidirect) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: not received via bidirectional link\n"); goto out_neigh; } if (dup_status == BATADV_NEIGH_DUP) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: duplicate packet received\n"); goto out_neigh; } batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Forwarding packet: rebroadcast originator packet\n"); batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet, is_single_hop_neigh, is_from_best_next_hop, if_incoming, if_outgoing); out_neigh: if (orig_neigh_node && !is_single_hop_neigh) batadv_orig_node_put(orig_neigh_node); out: batadv_neigh_ifinfo_put(router_ifinfo); batadv_neigh_node_put(router); batadv_neigh_node_put(router_router); batadv_neigh_node_put(orig_neigh_router); batadv_hardif_neigh_put(hardif_neigh); consume_skb(skb_priv); } /** * batadv_iv_ogm_process_reply() - Check OGM for direct reply and process it * @ogm_packet: rebroadcast OGM packet to process * @if_incoming: the interface where this packet was received * @orig_node: originator which reproadcasted the OGMs * @if_incoming_seqno: OGM sequence number when rebroadcast was received */ static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet, struct batadv_hard_iface *if_incoming, struct batadv_orig_node *orig_node, u32 if_incoming_seqno) { struct batadv_orig_ifinfo *orig_ifinfo; s32 bit_pos; u8 *weight; /* neighbor has to indicate direct link and it has to * come via the corresponding interface */ if (!(ogm_packet->flags & BATADV_DIRECTLINK)) return; if (!batadv_compare_eth(if_incoming->net_dev->dev_addr, ogm_packet->orig)) return; orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_incoming); if (!orig_ifinfo) return; /* save packet seqno for bidirectional check */ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); bit_pos = if_incoming_seqno - 2; bit_pos -= ntohl(ogm_packet->seqno); batadv_set_bit(orig_ifinfo->bat_iv.bcast_own, bit_pos); weight = &orig_ifinfo->bat_iv.bcast_own_sum; *weight = bitmap_weight(orig_ifinfo->bat_iv.bcast_own, BATADV_TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_ifinfo_put(orig_ifinfo); } /** * batadv_iv_ogm_process() - process an incoming batman iv OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) * @if_incoming: the interface where this packet was received */ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_orig_node *orig_neigh_node, *orig_node; struct batadv_hard_iface *hard_iface; struct batadv_ogm_packet *ogm_packet; u32 if_incoming_seqno; bool has_directlink_flag; struct ethhdr *ethhdr; bool is_my_oldorig = false; bool is_my_addr = false; bool is_my_orig = false; ogm_packet = (struct batadv_ogm_packet *)(skb->data + ogm_offset); ethhdr = eth_hdr(skb); /* Silently drop when the batman packet is actually not a * correct packet. * * This might happen if a packet is padded (e.g. Ethernet has a * minimum frame length of 64 byte) and the aggregation interprets * it as an additional length. * * TODO: A more sane solution would be to have a bit in the * batadv_ogm_packet to detect whether the packet is the last * packet in an aggregation. Here we expect that the padding * is always zero (or not 0x01) */ if (ogm_packet->packet_type != BATADV_IV_OGM) return; /* could be changed by schedule_own_packet() */ if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno); if (ogm_packet->flags & BATADV_DIRECTLINK) has_directlink_flag = true; else has_directlink_flag = false; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n", ethhdr->h_source, if_incoming->net_dev->name, if_incoming->net_dev->dev_addr, ogm_packet->orig, ogm_packet->prev_sender, ntohl(ogm_packet->seqno), ogm_packet->tq, ogm_packet->ttl, ogm_packet->version, has_directlink_flag); rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; if (hard_iface->soft_iface != if_incoming->soft_iface) continue; if (batadv_compare_eth(ethhdr->h_source, hard_iface->net_dev->dev_addr)) is_my_addr = true; if (batadv_compare_eth(ogm_packet->orig, hard_iface->net_dev->dev_addr)) is_my_orig = true; if (batadv_compare_eth(ogm_packet->prev_sender, hard_iface->net_dev->dev_addr)) is_my_oldorig = true; } rcu_read_unlock(); if (is_my_addr) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: received my own broadcast (sender: %pM)\n", ethhdr->h_source); return; } if (is_my_orig) { orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source); if (!orig_neigh_node) return; batadv_iv_ogm_process_reply(ogm_packet, if_incoming, orig_neigh_node, if_incoming_seqno); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: originator packet from myself (via neighbor)\n"); batadv_orig_node_put(orig_neigh_node); return; } if (is_my_oldorig) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n", ethhdr->h_source); return; } if (ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n", ethhdr->h_source); return; } orig_node = batadv_iv_ogm_orig_get(bat_priv, ogm_packet->orig); if (!orig_node) return; batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node, if_incoming, BATADV_IF_DEFAULT); rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; if (hard_iface->soft_iface != bat_priv->soft_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) continue; batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node, if_incoming, hard_iface); batadv_hardif_put(hard_iface); } rcu_read_unlock(); batadv_orig_node_put(orig_node); } static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work) { struct delayed_work *delayed_work; struct batadv_forw_packet *forw_packet; struct batadv_priv *bat_priv; bool dropped = false; delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { dropped = true; goto out; } batadv_iv_ogm_emit(forw_packet); /* we have to have at least one packet in the queue to determine the * queues wake up time unless we are shutting down. * * only re-schedule if this is the "original" copy, e.g. the OGM of the * primary interface should only be rescheduled once per period, but * this function will be called for the forw_packet instances of the * other secondary interfaces as well. */ if (forw_packet->own && forw_packet->if_incoming == forw_packet->if_outgoing) batadv_iv_ogm_schedule(forw_packet->if_incoming); out: /* do we get something for free()? */ if (batadv_forw_packet_steal(forw_packet, &bat_priv->forw_bat_list_lock)) batadv_forw_packet_free(forw_packet, dropped); } static int batadv_iv_ogm_receive(struct sk_buff *skb, struct batadv_hard_iface *if_incoming) { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_ogm_packet *ogm_packet; u8 *packet_pos; int ogm_offset; bool res; int ret = NET_RX_DROP; res = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN); if (!res) goto free_skb; /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface * that does not have B.A.T.M.A.N. IV enabled ? */ if (bat_priv->algo_ops->iface.enable != batadv_iv_ogm_iface_enable) goto free_skb; batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); ogm_offset = 0; ogm_packet = (struct batadv_ogm_packet *)skb->data; /* unpack the aggregated packets and process them one by one */ while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), ogm_packet)) { batadv_iv_ogm_process(skb, ogm_offset, if_incoming); ogm_offset += BATADV_OGM_HLEN; ogm_offset += ntohs(ogm_packet->tvlv_len); packet_pos = skb->data + ogm_offset; ogm_packet = (struct batadv_ogm_packet *)packet_pos; } ret = NET_RX_SUCCESS; free_skb: if (ret == NET_RX_SUCCESS) consume_skb(skb); else kfree_skb(skb); return ret; } /** * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a * given outgoing interface. * @neigh_node: Neighbour of interest * @if_outgoing: Outgoing interface of interest * @tq_avg: Pointer of where to store the TQ average * * Return: False if no average TQ available, otherwise true. */ static bool batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node, struct batadv_hard_iface *if_outgoing, u8 *tq_avg) { struct batadv_neigh_ifinfo *n_ifinfo; n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing); if (!n_ifinfo) return false; *tq_avg = n_ifinfo->bat_iv.tq_avg; batadv_neigh_ifinfo_put(n_ifinfo); return true; } /** * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a * message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the soft interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @neigh_node: Single hops neighbour * @best: Is the best originator * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node, bool best) { void *hdr; u8 tq_avg; unsigned int last_seen_msecs; last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen); if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node, if_outgoing, &tq_avg)) return 0; if (if_outgoing != BATADV_IF_DEFAULT && if_outgoing != neigh_node->if_incoming) return 0; hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_ORIGINATORS); if (!hdr) return -ENOBUFS; if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig_node->orig) || nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, neigh_node->addr) || nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, neigh_node->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, neigh_node->if_incoming->net_dev->ifindex) || nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) || nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs)) goto nla_put_failure; if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } /** * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the soft interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @sub_s: Number of sub entries to skip * * This function assumes the caller holds rcu_read_lock(). * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, struct batadv_orig_node *orig_node, int *sub_s) { struct batadv_neigh_node *neigh_node_best; struct batadv_neigh_node *neigh_node; int sub = 0; bool best; u8 tq_avg_best; neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing); if (!neigh_node_best) goto out; if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node_best, if_outgoing, &tq_avg_best)) goto out; if (tq_avg_best == 0) goto out; hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) { if (sub++ < *sub_s) continue; best = (neigh_node == neigh_node_best); if (batadv_iv_ogm_orig_dump_subentry(msg, portid, seq, bat_priv, if_outgoing, orig_node, neigh_node, best)) { batadv_neigh_node_put(neigh_node_best); *sub_s = sub - 1; return -EMSGSIZE; } } out: batadv_neigh_node_put(neigh_node_best); *sub_s = 0; return 0; } /** * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a * message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the soft interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @head: Bucket to be dumped * @idx_s: Number of entries to be skipped * @sub: Number of sub entries to be skipped * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, struct hlist_head *head, int *idx_s, int *sub) { struct batadv_orig_node *orig_node; int idx = 0; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { if (idx++ < *idx_s) continue; if (batadv_iv_ogm_orig_dump_entry(msg, portid, seq, bat_priv, if_outgoing, orig_node, sub)) { rcu_read_unlock(); *idx_s = idx - 1; return -EMSGSIZE; } } rcu_read_unlock(); *idx_s = 0; *sub = 0; return 0; } /** * batadv_iv_ogm_orig_dump() - Dump the originators into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information * @if_outgoing: Limit dump to entries with this outgoing interface */ static void batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing) { struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; int bucket = cb->args[0]; int idx = cb->args[1]; int sub = cb->args[2]; int portid = NETLINK_CB(cb->skb).portid; while (bucket < hash->size) { head = &hash->table[bucket]; if (batadv_iv_ogm_orig_dump_bucket(msg, portid, cb->nlh->nlmsg_seq, bat_priv, if_outgoing, head, &idx, &sub)) break; bucket++; } cb->args[0] = bucket; cb->args[1] = idx; cb->args[2] = sub; } /** * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors * @neigh1: the first neighbor object of the comparison * @if_outgoing1: outgoing interface for the first neighbor * @neigh2: the second neighbor object of the comparison * @if_outgoing2: outgoing interface for the second neighbor * @diff: pointer to integer receiving the calculated difference * * The content of *@diff is only valid when this function returns true. * It is less, equal to or greater than 0 if the metric via neigh1 is lower, * the same as or higher than the metric via neigh2 * * Return: true when the difference could be calculated, false otherwise */ static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1, struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2, int *diff) { struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo; u8 tq1, tq2; bool ret = true; neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1); neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2); if (!neigh1_ifinfo || !neigh2_ifinfo) { ret = false; goto out; } tq1 = neigh1_ifinfo->bat_iv.tq_avg; tq2 = neigh2_ifinfo->bat_iv.tq_avg; *diff = (int)tq1 - (int)tq2; out: batadv_neigh_ifinfo_put(neigh1_ifinfo); batadv_neigh_ifinfo_put(neigh2_ifinfo); return ret; } /** * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @hardif_neigh: Neighbour to be dumped * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_hardif_neigh_node *hardif_neigh) { void *hdr; unsigned int last_seen_msecs; last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen); hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_NEIGHBORS); if (!hdr) return -ENOBUFS; if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, hardif_neigh->addr) || nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, hardif_neigh->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, hardif_neigh->if_incoming->net_dev->ifindex) || nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } /** * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface * into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message * @bat_priv: The bat priv with all the soft interface information * @hard_iface: Hard interface to dump the neighbours for * @idx_s: Number of entries to skip * * This function assumes the caller holds rcu_read_lock(). * * Return: Error code, or 0 on success */ static int batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_priv *bat_priv, struct batadv_hard_iface *hard_iface, int *idx_s) { struct batadv_hardif_neigh_node *hardif_neigh; int idx = 0; hlist_for_each_entry_rcu(hardif_neigh, &hard_iface->neigh_list, list) { if (idx++ < *idx_s) continue; if (batadv_iv_ogm_neigh_dump_neigh(msg, portid, seq, hardif_neigh)) { *idx_s = idx - 1; return -EMSGSIZE; } } *idx_s = 0; return 0; } /** * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information * @single_hardif: Limit dump to this hard interface */ static void batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_hard_iface *single_hardif) { struct batadv_hard_iface *hard_iface; int i_hardif = 0; int i_hardif_s = cb->args[0]; int idx = cb->args[1]; int portid = NETLINK_CB(cb->skb).portid; rcu_read_lock(); if (single_hardif) { if (i_hardif_s == 0) { if (batadv_iv_ogm_neigh_dump_hardif(msg, portid, cb->nlh->nlmsg_seq, bat_priv, single_hardif, &idx) == 0) i_hardif++; } } else { list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->soft_iface != bat_priv->soft_iface) continue; if (i_hardif++ < i_hardif_s) continue; if (batadv_iv_ogm_neigh_dump_hardif(msg, portid, cb->nlh->nlmsg_seq, bat_priv, hard_iface, &idx)) { i_hardif--; break; } } } rcu_read_unlock(); cb->args[0] = i_hardif; cb->args[1] = idx; } /** * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors * @neigh1: the first neighbor object of the comparison * @if_outgoing1: outgoing interface for the first neighbor * @neigh2: the second neighbor object of the comparison * @if_outgoing2: outgoing interface for the second neighbor * * Return: a value less, equal to or greater than 0 if the metric via neigh1 is * lower, the same as or higher than the metric via neigh2 */ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1, struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2) { bool ret; int diff; ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2, if_outgoing2, &diff); if (!ret) return 0; return diff; } /** * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better * than neigh2 from the metric prospective * @neigh1: the first neighbor object of the comparison * @if_outgoing1: outgoing interface for the first neighbor * @neigh2: the second neighbor object of the comparison * @if_outgoing2: outgoing interface for the second neighbor * * Return: true if the metric via neigh1 is equally good or better than * the metric via neigh2, false otherwise. */ static bool batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1, struct batadv_hard_iface *if_outgoing1, struct batadv_neigh_node *neigh2, struct batadv_hard_iface *if_outgoing2) { bool ret; int diff; ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2, if_outgoing2, &diff); if (!ret) return false; ret = diff > -BATADV_TQ_SIMILARITY_THRESHOLD; return ret; } static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface) { /* begin scheduling originator messages on that interface */ batadv_iv_ogm_schedule(hard_iface); } /** * batadv_iv_init_sel_class() - initialize GW selection class * @bat_priv: the bat priv with all the soft interface information */ static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv) { /* set default TQ difference threshold to 20 */ atomic_set(&bat_priv->gw.sel_class, 20); } static struct batadv_gw_node * batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) { struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo; struct batadv_gw_node *gw_node, *curr_gw = NULL; u64 max_gw_factor = 0; u64 tmp_gw_factor = 0; u8 max_tq = 0; u8 tq_avg; struct batadv_orig_node *orig_node; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) { orig_node = gw_node->orig_node; router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router) continue; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto next; if (!kref_get_unless_zero(&gw_node->refcount)) goto next; tq_avg = router_ifinfo->bat_iv.tq_avg; switch (atomic_read(&bat_priv->gw.sel_class)) { case 1: /* fast connection */ tmp_gw_factor = tq_avg * tq_avg; tmp_gw_factor *= gw_node->bandwidth_down; tmp_gw_factor *= 100 * 100; tmp_gw_factor >>= 18; if (tmp_gw_factor > max_gw_factor || (tmp_gw_factor == max_gw_factor && tq_avg > max_tq)) { batadv_gw_node_put(curr_gw); curr_gw = gw_node; kref_get(&curr_gw->refcount); } break; default: /* 2: stable connection (use best statistic) * 3: fast-switch (use best statistic but change as * soon as a better gateway appears) * XX: late-switch (use best statistic but change as * soon as a better gateway appears which has * $routing_class more tq points) */ if (tq_avg > max_tq) { batadv_gw_node_put(curr_gw); curr_gw = gw_node; kref_get(&curr_gw->refcount); } break; } if (tq_avg > max_tq) max_tq = tq_avg; if (tmp_gw_factor > max_gw_factor) max_gw_factor = tmp_gw_factor; batadv_gw_node_put(gw_node); next: batadv_neigh_node_put(router); batadv_neigh_ifinfo_put(router_ifinfo); } rcu_read_unlock(); return curr_gw; } static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv, struct batadv_orig_node *curr_gw_orig, struct batadv_orig_node *orig_node) { struct batadv_neigh_ifinfo *router_orig_ifinfo = NULL; struct batadv_neigh_ifinfo *router_gw_ifinfo = NULL; struct batadv_neigh_node *router_gw = NULL; struct batadv_neigh_node *router_orig = NULL; u8 gw_tq_avg, orig_tq_avg; bool ret = false; /* dynamic re-election is performed only on fast or late switch */ if (atomic_read(&bat_priv->gw.sel_class) <= 2) return false; router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT); if (!router_gw) { ret = true; goto out; } router_gw_ifinfo = batadv_neigh_ifinfo_get(router_gw, BATADV_IF_DEFAULT); if (!router_gw_ifinfo) { ret = true; goto out; } router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router_orig) goto out; router_orig_ifinfo = batadv_neigh_ifinfo_get(router_orig, BATADV_IF_DEFAULT); if (!router_orig_ifinfo) goto out; gw_tq_avg = router_gw_ifinfo->bat_iv.tq_avg; orig_tq_avg = router_orig_ifinfo->bat_iv.tq_avg; /* the TQ value has to be better */ if (orig_tq_avg < gw_tq_avg) goto out; /* if the routing class is greater than 3 the value tells us how much * greater the TQ value of the new gateway must be */ if ((atomic_read(&bat_priv->gw.sel_class) > 3) && (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", gw_tq_avg, orig_tq_avg); ret = true; out: batadv_neigh_ifinfo_put(router_gw_ifinfo); batadv_neigh_ifinfo_put(router_orig_ifinfo); batadv_neigh_node_put(router_gw); batadv_neigh_node_put(router_orig); return ret; } /** * batadv_iv_gw_dump_entry() - Dump a gateway into a message * @msg: Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information * @gw_node: Gateway to be dumped * * Return: Error code, or 0 on success */ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_priv *bat_priv, struct batadv_gw_node *gw_node) { struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; struct batadv_gw_node *curr_gw = NULL; int ret = 0; void *hdr; router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); if (!router) goto out; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS); if (!hdr) { ret = -ENOBUFS; goto out; } genl_dump_check_consistent(cb, hdr); ret = -EMSGSIZE; if (curr_gw == gw_node) if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) { genlmsg_cancel(msg, hdr); goto out; } if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, gw_node->orig_node->orig) || nla_put_u8(msg, BATADV_ATTR_TQ, router_ifinfo->bat_iv.tq_avg) || nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN, router->addr) || nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, router->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, router->if_incoming->net_dev->ifindex) || nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN, gw_node->bandwidth_down) || nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, gw_node->bandwidth_up)) { genlmsg_cancel(msg, hdr); goto out; } genlmsg_end(msg, hdr); ret = 0; out: batadv_gw_node_put(curr_gw); batadv_neigh_ifinfo_put(router_ifinfo); batadv_neigh_node_put(router); return ret; } /** * batadv_iv_gw_dump() - Dump gateways into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information */ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv) { int portid = NETLINK_CB(cb->skb).portid; struct batadv_gw_node *gw_node; int idx_skip = cb->args[0]; int idx = 0; spin_lock_bh(&bat_priv->gw.list_lock); cb->seq = bat_priv->gw.generation << 1 | 1; hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) { if (idx++ < idx_skip) continue; if (batadv_iv_gw_dump_entry(msg, portid, cb, bat_priv, gw_node)) { idx_skip = idx - 1; goto unlock; } } idx_skip = idx; unlock: spin_unlock_bh(&bat_priv->gw.list_lock); cb->args[0] = idx_skip; } static struct batadv_algo_ops batadv_batman_iv __read_mostly = { .name = "BATMAN_IV", .iface = { .enable = batadv_iv_ogm_iface_enable, .enabled = batadv_iv_iface_enabled, .disable = batadv_iv_ogm_iface_disable, .update_mac = batadv_iv_ogm_iface_update_mac, .primary_set = batadv_iv_ogm_primary_iface_set, }, .neigh = { .cmp = batadv_iv_ogm_neigh_cmp, .is_similar_or_better = batadv_iv_ogm_neigh_is_sob, .dump = batadv_iv_ogm_neigh_dump, }, .orig = { .dump = batadv_iv_ogm_orig_dump, }, .gw = { .init_sel_class = batadv_iv_init_sel_class, .sel_class_max = BATADV_TQ_MAX_VALUE, .get_best_gw_node = batadv_iv_gw_get_best_gw_node, .is_eligible = batadv_iv_gw_is_eligible, .dump = batadv_iv_gw_dump, }, }; /** * batadv_iv_init() - B.A.T.M.A.N. IV initialization function * * Return: 0 on success or negative error number in case of failure */ int __init batadv_iv_init(void) { int ret; /* batman originator packet */ ret = batadv_recv_handler_register(BATADV_IV_OGM, batadv_iv_ogm_receive); if (ret < 0) goto out; ret = batadv_algo_register(&batadv_batman_iv); if (ret < 0) goto handler_unregister; goto out; handler_unregister: batadv_recv_handler_unregister(BATADV_IV_OGM); out: return ret; }
17 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 // SPDX-License-Identifier: GPL-2.0 #include <linux/cred.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/quotaops.h> #include <linux/sched.h> #include <linux/slab.h> #include <net/netlink.h> #include <net/genetlink.h> static const struct genl_multicast_group quota_mcgrps[] = { { .name = "events", }, }; /* Netlink family structure for quota */ static struct genl_family quota_genl_family __ro_after_init = { .module = THIS_MODULE, .hdrsize = 0, .name = "VFS_DQUOT", .version = 1, .maxattr = QUOTA_NL_A_MAX, .mcgrps = quota_mcgrps, .n_mcgrps = ARRAY_SIZE(quota_mcgrps), }; /** * quota_send_warning - Send warning to userspace about exceeded quota * @qid: The kernel internal quota identifier. * @dev: The device on which the fs is mounted (sb->s_dev) * @warntype: The type of the warning: QUOTA_NL_... * * This can be used by filesystems (including those which don't use * dquot) to send a message to userspace relating to quota limits. * */ void quota_send_warning(struct kqid qid, dev_t dev, const char warntype) { static atomic_t seq; struct sk_buff *skb; void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + 2 * nla_total_size_64bit(sizeof(u64)); /* We have to allocate using GFP_NOFS as we are called from a * filesystem performing write and thus further recursion into * the fs to free some data could cause deadlocks. */ skb = genlmsg_new(msg_size, GFP_NOFS); if (!skb) { printk(KERN_ERR "VFS: Not enough memory to send quota warning.\n"); return; } msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), &quota_genl_family, 0, QUOTA_NL_C_WARNING); if (!msg_head) { printk(KERN_ERR "VFS: Cannot store netlink header in quota warning.\n"); goto err_out; } ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); if (ret) goto attr_err_out; ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID, from_kqid_munged(&init_user_ns, qid), QUOTA_NL_A_PAD); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID, from_kuid_munged(&init_user_ns, current_uid()), QUOTA_NL_A_PAD); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); genlmsg_multicast(&quota_genl_family, skb, 0, 0, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); err_out: kfree_skb(skb); } EXPORT_SYMBOL(quota_send_warning); static int __init quota_init(void) { if (genl_register_family(&quota_genl_family) != 0) printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); return 0; }; fs_initcall(quota_init);
25 19 39 39 39 141 142 39 142 142 142 318 282 39 38 15 16 1 15 15 15 4018 4019 261 3315 3314 256 10 11 10 1 122 122 124 123 23 15 122 124 111 124 13 13 63 9 64 64 61 5 217 154 73 64 612 612 612 612 10 10 8 10 2 8 2 10 2 13 13 4 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 // SPDX-License-Identifier: GPL-2.0 /* * drivers/base/power/wakeup.c - System wakeup events framework * * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. */ #define pr_fmt(fmt) "PM: " fmt #include <linux/device.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/export.h> #include <linux/suspend.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/pm_wakeirq.h> #include <trace/events/power.h> #include "power.h" #define list_for_each_entry_rcu_locked(pos, head, member) \ list_for_each_entry_rcu(pos, head, member, \ srcu_read_lock_held(&wakeup_srcu)) /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. */ bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ static unsigned int wakeup_irq[2] __read_mostly; static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); /* If greater than 0 and the system is suspending, terminate the suspend. */ static atomic_t pm_abort_suspend __read_mostly; /* * Combined counters of registered wakeup events and wakeup events in progress. * They need to be modified together atomically, so it's better to use one * atomic variable to hold them both. */ static atomic_t combined_event_count = ATOMIC_INIT(0); #define IN_PROGRESS_BITS (sizeof(int) * 4) #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) static void split_counters(unsigned int *cnt, unsigned int *inpr) { unsigned int comb = atomic_read(&combined_event_count); *cnt = (comb >> IN_PROGRESS_BITS); *inpr = comb & MAX_IN_PROGRESS; } /* A preserved old value of the events counter. */ static unsigned int saved_count; static DEFINE_RAW_SPINLOCK(events_lock); static void pm_wakeup_timer_fn(struct timer_list *t); static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); DEFINE_STATIC_SRCU(wakeup_srcu); static struct wakeup_source deleted_ws = { .name = "deleted", .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), }; static DEFINE_IDA(wakeup_ida); /** * wakeup_source_create - Create a struct wakeup_source object. * @name: Name of the new wakeup source. */ struct wakeup_source *wakeup_source_create(const char *name) { struct wakeup_source *ws; const char *ws_name; int id; ws = kzalloc(sizeof(*ws), GFP_KERNEL); if (!ws) goto err_ws; ws_name = kstrdup_const(name, GFP_KERNEL); if (!ws_name) goto err_name; ws->name = ws_name; id = ida_alloc(&wakeup_ida, GFP_KERNEL); if (id < 0) goto err_id; ws->id = id; return ws; err_id: kfree_const(ws->name); err_name: kfree(ws); err_ws: return NULL; } EXPORT_SYMBOL_GPL(wakeup_source_create); /* * Record wakeup_source statistics being deleted into a dummy wakeup_source. */ static void wakeup_source_record(struct wakeup_source *ws) { unsigned long flags; spin_lock_irqsave(&deleted_ws.lock, flags); if (ws->event_count) { deleted_ws.total_time = ktime_add(deleted_ws.total_time, ws->total_time); deleted_ws.prevent_sleep_time = ktime_add(deleted_ws.prevent_sleep_time, ws->prevent_sleep_time); deleted_ws.max_time = ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ? deleted_ws.max_time : ws->max_time; deleted_ws.event_count += ws->event_count; deleted_ws.active_count += ws->active_count; deleted_ws.relax_count += ws->relax_count; deleted_ws.expire_count += ws->expire_count; deleted_ws.wakeup_count += ws->wakeup_count; } spin_unlock_irqrestore(&deleted_ws.lock, flags); } static void wakeup_source_free(struct wakeup_source *ws) { ida_free(&wakeup_ida, ws->id); kfree_const(ws->name); kfree(ws); } /** * wakeup_source_destroy - Destroy a struct wakeup_source object. * @ws: Wakeup source to destroy. * * Use only for wakeup source objects created with wakeup_source_create(). */ void wakeup_source_destroy(struct wakeup_source *ws) { if (!ws) return; __pm_relax(ws); wakeup_source_record(ws); wakeup_source_free(ws); } EXPORT_SYMBOL_GPL(wakeup_source_destroy); /** * wakeup_source_add - Add given object to the list of wakeup sources. * @ws: Wakeup source object to add to the list. */ void wakeup_source_add(struct wakeup_source *ws) { unsigned long flags; if (WARN_ON(!ws)) return; spin_lock_init(&ws->lock); timer_setup(&ws->timer, pm_wakeup_timer_fn, 0); ws->active = false; raw_spin_lock_irqsave(&events_lock, flags); list_add_rcu(&ws->entry, &wakeup_sources); raw_spin_unlock_irqrestore(&events_lock, flags); } EXPORT_SYMBOL_GPL(wakeup_source_add); /** * wakeup_source_remove - Remove given object from the wakeup sources list. * @ws: Wakeup source object to remove from the list. */ void wakeup_source_remove(struct wakeup_source *ws) { unsigned long flags; if (WARN_ON(!ws)) return; raw_spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); raw_spin_unlock_irqrestore(&events_lock, flags); synchronize_srcu(&wakeup_srcu); del_timer_sync(&ws->timer); /* * Clear timer.function to make wakeup_source_not_registered() treat * this wakeup source as not registered. */ ws->timer.function = NULL; } EXPORT_SYMBOL_GPL(wakeup_source_remove); /** * wakeup_source_register - Create wakeup source and add it to the list. * @dev: Device this wakeup source is associated with (or NULL if virtual). * @name: Name of the wakeup source to register. */ struct wakeup_source *wakeup_source_register(struct device *dev, const char *name) { struct wakeup_source *ws; int ret; ws = wakeup_source_create(name); if (ws) { if (!dev || device_is_registered(dev)) { ret = wakeup_source_sysfs_add(dev, ws); if (ret) { wakeup_source_free(ws); return NULL; } } wakeup_source_add(ws); } return ws; } EXPORT_SYMBOL_GPL(wakeup_source_register); /** * wakeup_source_unregister - Remove wakeup source from the list and remove it. * @ws: Wakeup source object to unregister. */ void wakeup_source_unregister(struct wakeup_source *ws) { if (ws) { wakeup_source_remove(ws); if (ws->dev) wakeup_source_sysfs_remove(ws); wakeup_source_destroy(ws); } } EXPORT_SYMBOL_GPL(wakeup_source_unregister); /** * wakeup_sources_read_lock - Lock wakeup source list for read. * * Returns an index of srcu lock for struct wakeup_srcu. * This index must be passed to the matching wakeup_sources_read_unlock(). */ int wakeup_sources_read_lock(void) { return srcu_read_lock(&wakeup_srcu); } EXPORT_SYMBOL_GPL(wakeup_sources_read_lock); /** * wakeup_sources_read_unlock - Unlock wakeup source list. * @idx: return value from corresponding wakeup_sources_read_lock() */ void wakeup_sources_read_unlock(int idx) { srcu_read_unlock(&wakeup_srcu, idx); } EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock); /** * wakeup_sources_walk_start - Begin a walk on wakeup source list * * Returns first object of the list of wakeup sources. * * Note that to be safe, wakeup sources list needs to be locked by calling * wakeup_source_read_lock() for this. */ struct wakeup_source *wakeup_sources_walk_start(void) { struct list_head *ws_head = &wakeup_sources; return list_entry_rcu(ws_head->next, struct wakeup_source, entry); } EXPORT_SYMBOL_GPL(wakeup_sources_walk_start); /** * wakeup_sources_walk_next - Get next wakeup source from the list * @ws: Previous wakeup source object * * Note that to be safe, wakeup sources list needs to be locked by calling * wakeup_source_read_lock() for this. */ struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws) { struct list_head *ws_head = &wakeup_sources; return list_next_or_null_rcu(ws_head, &ws->entry, struct wakeup_source, entry); } EXPORT_SYMBOL_GPL(wakeup_sources_walk_next); /** * device_wakeup_attach - Attach a wakeup source object to a device object. * @dev: Device to handle. * @ws: Wakeup source object to attach to @dev. * * This causes @dev to be treated as a wakeup device. */ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) { spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { spin_unlock_irq(&dev->power.lock); return -EEXIST; } dev->power.wakeup = ws; if (dev->power.wakeirq) device_wakeup_attach_irq(dev, dev->power.wakeirq); spin_unlock_irq(&dev->power.lock); return 0; } /** * device_wakeup_enable - Enable given device to be a wakeup source. * @dev: Device to handle. * * Create a wakeup source object, register it and attach it to @dev. */ int device_wakeup_enable(struct device *dev) { struct wakeup_source *ws; int ret; if (!dev || !dev->power.can_wakeup) return -EINVAL; if (pm_suspend_target_state != PM_SUSPEND_ON) dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); ws = wakeup_source_register(dev, dev_name(dev)); if (!ws) return -ENOMEM; ret = device_wakeup_attach(dev, ws); if (ret) wakeup_source_unregister(ws); return ret; } EXPORT_SYMBOL_GPL(device_wakeup_enable); /** * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source * @dev: Device to handle * @wakeirq: Device specific wakeirq entry * * Attach a device wakeirq to the wakeup source so the device * wake IRQ can be configured automatically for suspend and * resume. * * Call under the device's power.lock lock. */ void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq) { struct wakeup_source *ws; ws = dev->power.wakeup; if (!ws) return; if (ws->wakeirq) dev_err(dev, "Leftover wakeup IRQ found, overriding\n"); ws->wakeirq = wakeirq; } /** * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source * @dev: Device to handle * * Removes a device wakeirq from the wakeup source. * * Call under the device's power.lock lock. */ void device_wakeup_detach_irq(struct device *dev) { struct wakeup_source *ws; ws = dev->power.wakeup; if (ws) ws->wakeirq = NULL; } /** * device_wakeup_arm_wake_irqs - * * Iterates over the list of device wakeirqs to arm them. */ void device_wakeup_arm_wake_irqs(void) { struct wakeup_source *ws; int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_arm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } /** * device_wakeup_disarm_wake_irqs - * * Iterates over the list of device wakeirqs to disarm them. */ void device_wakeup_disarm_wake_irqs(void) { struct wakeup_source *ws; int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) dev_pm_disarm_wake_irq(ws->wakeirq); srcu_read_unlock(&wakeup_srcu, srcuidx); } /** * device_wakeup_detach - Detach a device's wakeup source object from it. * @dev: Device to detach the wakeup source object from. * * After it returns, @dev will not be treated as a wakeup device any more. */ static struct wakeup_source *device_wakeup_detach(struct device *dev) { struct wakeup_source *ws; spin_lock_irq(&dev->power.lock); ws = dev->power.wakeup; dev->power.wakeup = NULL; spin_unlock_irq(&dev->power.lock); return ws; } /** * device_wakeup_disable - Do not regard a device as a wakeup source any more. * @dev: Device to handle. * * Detach the @dev's wakeup source object from it, unregister this wakeup source * object and destroy it. */ void device_wakeup_disable(struct device *dev) { struct wakeup_source *ws; if (!dev || !dev->power.can_wakeup) return; ws = device_wakeup_detach(dev); wakeup_source_unregister(ws); } EXPORT_SYMBOL_GPL(device_wakeup_disable); /** * device_set_wakeup_capable - Set/reset device wakeup capability flag. * @dev: Device to handle. * @capable: Whether or not @dev is capable of waking up the system from sleep. * * If @capable is set, set the @dev's power.can_wakeup flag and add its * wakeup-related attributes to sysfs. Otherwise, unset the @dev's * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. * * This function may sleep and it can't be called from any context where * sleeping is not allowed. */ void device_set_wakeup_capable(struct device *dev, bool capable) { if (!!dev->power.can_wakeup == !!capable) return; dev->power.can_wakeup = capable; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { int ret = wakeup_sysfs_add(dev); if (ret) dev_info(dev, "Wakeup sysfs attributes not added\n"); } else { wakeup_sysfs_remove(dev); } } } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); /** * device_set_wakeup_enable - Enable or disable a device to wake up the system. * @dev: Device to handle. * @enable: enable/disable flag */ int device_set_wakeup_enable(struct device *dev, bool enable) { if (enable) return device_wakeup_enable(dev); device_wakeup_disable(dev); return 0; } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); /** * wakeup_source_not_registered - validate the given wakeup source. * @ws: Wakeup source to be validated. */ static bool wakeup_source_not_registered(struct wakeup_source *ws) { /* * Use timer struct to check if the given source is initialized * by wakeup_source_add. */ return ws->timer.function != pm_wakeup_timer_fn; } /* * The functions below use the observation that each wakeup event starts a * period in which the system should not be suspended. The moment this period * will end depends on how the wakeup event is going to be processed after being * detected and all of the possible cases can be divided into two distinct * groups. * * First, a wakeup event may be detected by the same functional unit that will * carry out the entire processing of it and possibly will pass it to user space * for further processing. In that case the functional unit that has detected * the event may later "close" the "no suspend" period associated with it * directly as soon as it has been dealt with. The pair of pm_stay_awake() and * pm_relax(), balanced with each other, is supposed to be used in such * situations. * * Second, a wakeup event may be detected by one functional unit and processed * by another one. In that case the unit that has detected it cannot really * "close" the "no suspend" period associated with it, unless it knows in * advance what's going to happen to the event during processing. This * knowledge, however, may not be available to it, so it can simply specify time * to wait before the system can be suspended and pass it as the second * argument of pm_wakeup_event(). * * It is valid to call pm_relax() after pm_wakeup_event(), in which case the * "no suspend" period will be ended either by the pm_relax(), or by the timer * function executed when the timer expires, whichever comes first. */ /** * wakeup_source_activate - Mark given wakeup source as active. * @ws: Wakeup source to handle. * * Update the @ws' statistics and, if @ws has just been activated, notify the PM * core of the event by incrementing the counter of the wakeup events being * processed. */ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; if (WARN_ONCE(wakeup_source_not_registered(ws), "unregistered wakeup source\n")) return; ws->active = true; ws->active_count++; ws->last_time = ktime_get(); if (ws->autosleep_enabled) ws->start_prevent_time = ws->last_time; /* Increment the counter of events in progress. */ cec = atomic_inc_return(&combined_event_count); trace_wakeup_source_activate(ws->name, cec); } /** * wakeup_source_report_event - Report wakeup event using the given source. * @ws: Wakeup source to report the event for. * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. */ static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) { ws->event_count++; /* This is racy, but the counter is approximate anyway. */ if (events_check_enabled) ws->wakeup_count++; if (!ws->active) wakeup_source_activate(ws); if (hard) pm_system_wakeup(); } /** * __pm_stay_awake - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the source of the event. * * It is safe to call this function from interrupt context. */ void __pm_stay_awake(struct wakeup_source *ws) { unsigned long flags; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws, false); del_timer(&ws->timer); ws->timer_expires = 0; spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(__pm_stay_awake); /** * pm_stay_awake - Notify the PM core that a wakeup event is being processed. * @dev: Device the wakeup event is related to. * * Notify the PM core of a wakeup event (signaled by @dev) by calling * __pm_stay_awake for the @dev's wakeup source object. * * Call this function after detecting of a wakeup event if pm_relax() is going * to be called directly after processing the event (and possibly passing it to * user space for further processing). */ void pm_stay_awake(struct device *dev) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); __pm_stay_awake(dev->power.wakeup); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_stay_awake); #ifdef CONFIG_PM_AUTOSLEEP static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) { ktime_t delta = ktime_sub(now, ws->start_prevent_time); ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); } #else static inline void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) {} #endif /** * wakeup_source_deactivate - Mark given wakeup source as inactive. * @ws: Wakeup source to handle. * * Update the @ws' statistics and notify the PM core that the wakeup source has * become inactive by decrementing the counter of wakeup events being processed * and incrementing the counter of registered wakeup events. */ static void wakeup_source_deactivate(struct wakeup_source *ws) { unsigned int cnt, inpr, cec; ktime_t duration; ktime_t now; ws->relax_count++; /* * __pm_relax() may be called directly or from a timer function. * If it is called directly right after the timer function has been * started, but before the timer function calls __pm_relax(), it is * possible that __pm_stay_awake() will be called in the meantime and * will set ws->active. Then, ws->active may be cleared immediately * by the __pm_relax() called from the timer function, but in such a * case ws->relax_count will be different from ws->active_count. */ if (ws->relax_count != ws->active_count) { ws->relax_count--; return; } ws->active = false; now = ktime_get(); duration = ktime_sub(now, ws->last_time); ws->total_time = ktime_add(ws->total_time, duration); if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) ws->max_time = duration; ws->last_time = now; del_timer(&ws->timer); ws->timer_expires = 0; if (ws->autosleep_enabled) update_prevent_sleep_time(ws, now); /* * Increment the counter of registered wakeup events and decrement the * counter of wakeup events in progress simultaneously. */ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); trace_wakeup_source_deactivate(ws->name, cec); split_counters(&cnt, &inpr); if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) wake_up(&wakeup_count_wait_queue); } /** * __pm_relax - Notify the PM core that processing of a wakeup event has ended. * @ws: Wakeup source object associated with the source of the event. * * Call this function for wakeup events whose processing started with calling * __pm_stay_awake(). * * It is safe to call it from interrupt context. */ void __pm_relax(struct wakeup_source *ws) { unsigned long flags; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); if (ws->active) wakeup_source_deactivate(ws); spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(__pm_relax); /** * pm_relax - Notify the PM core that processing of a wakeup event has ended. * @dev: Device that signaled the event. * * Execute __pm_relax() for the @dev's wakeup source object. */ void pm_relax(struct device *dev) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); __pm_relax(dev->power.wakeup); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_relax); /** * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. * @t: timer list * * Call wakeup_source_deactivate() for the wakeup source whose address is stored * in @data if it is currently active and its timer has not been canceled and * the expiration time of the timer is not in future. */ static void pm_wakeup_timer_fn(struct timer_list *t) { struct wakeup_source *ws = from_timer(ws, t, timer); unsigned long flags; spin_lock_irqsave(&ws->lock, flags); if (ws->active && ws->timer_expires && time_after_eq(jiffies, ws->timer_expires)) { wakeup_source_deactivate(ws); ws->expire_count++; } spin_unlock_irqrestore(&ws->lock, flags); } /** * pm_wakeup_ws_event - Notify the PM core of a wakeup event. * @ws: Wakeup source object associated with the event source. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Notify the PM core of a wakeup event whose source is @ws that will take * approximately @msec milliseconds to be processed by the kernel. If @ws is * not active, activate it. If @msec is nonzero, set up the @ws' timer to * execute pm_wakeup_timer_fn() in future. * * It is safe to call this function from interrupt context. */ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) { unsigned long flags; unsigned long expires; if (!ws) return; spin_lock_irqsave(&ws->lock, flags); wakeup_source_report_event(ws, hard); if (!msec) { wakeup_source_deactivate(ws); goto unlock; } expires = jiffies + msecs_to_jiffies(msec); if (!expires) expires = 1; if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { mod_timer(&ws->timer, expires); ws->timer_expires = expires; } unlock: spin_unlock_irqrestore(&ws->lock, flags); } EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** * pm_wakeup_dev_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. * * Call pm_wakeup_ws_event() for the @dev's wakeup source object. */ void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) { unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->power.lock, flags); pm_wakeup_ws_event(dev->power.wakeup, msec, hard); spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; int srcuidx, active = 0; struct wakeup_source *last_activity_ws = NULL; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (ws->active) { pm_pr_dbg("active wakeup source: %s\n", ws->name); active = 1; } else if (!active && (!last_activity_ws || ktime_to_ns(ws->last_time) > ktime_to_ns(last_activity_ws->last_time))) { last_activity_ws = ws; } } if (!active && last_activity_ws) pm_pr_dbg("last active wakeup source: %s\n", last_activity_ws->name); srcu_read_unlock(&wakeup_srcu, srcuidx); } EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); /** * pm_wakeup_pending - Check if power transition in progress should be aborted. * * Compare the current number of registered wakeup events with its preserved * value from the past and return true if new wakeup events have been registered * since the old value was stored. Also return true if the current number of * wakeup events being processed is different from zero. */ bool pm_wakeup_pending(void) { unsigned long flags; bool ret = false; raw_spin_lock_irqsave(&events_lock, flags); if (events_check_enabled) { unsigned int cnt, inpr; split_counters(&cnt, &inpr); ret = (cnt != saved_count || inpr > 0); events_check_enabled = !ret; } raw_spin_unlock_irqrestore(&events_lock, flags); if (ret) { pm_pr_dbg("Wakeup pending, aborting suspend\n"); pm_print_active_wakeup_sources(); } return ret || atomic_read(&pm_abort_suspend) > 0; } EXPORT_SYMBOL_GPL(pm_wakeup_pending); void pm_system_wakeup(void) { atomic_inc(&pm_abort_suspend); s2idle_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); void pm_system_cancel_wakeup(void) { atomic_dec_if_positive(&pm_abort_suspend); } void pm_wakeup_clear(unsigned int irq_number) { raw_spin_lock_irq(&wakeup_irq_lock); if (irq_number && wakeup_irq[0] == irq_number) wakeup_irq[0] = wakeup_irq[1]; else wakeup_irq[0] = 0; wakeup_irq[1] = 0; raw_spin_unlock_irq(&wakeup_irq_lock); if (!irq_number) atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) { unsigned long flags; raw_spin_lock_irqsave(&wakeup_irq_lock, flags); if (wakeup_irq[0] == 0) wakeup_irq[0] = irq_number; else if (wakeup_irq[1] == 0) wakeup_irq[1] = irq_number; else irq_number = 0; pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number); raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); if (irq_number) pm_system_wakeup(); } unsigned int pm_wakeup_irq(void) { return wakeup_irq[0]; } /** * pm_get_wakeup_count - Read the number of registered wakeup events. * @count: Address to store the value at. * @block: Whether or not to block. * * Store the number of registered wakeup events at the address in @count. If * @block is set, block until the current number of wakeup events being * processed is zero. * * Return 'false' if the current number of wakeup events being processed is * nonzero. Otherwise return 'true'. */ bool pm_get_wakeup_count(unsigned int *count, bool block) { unsigned int cnt, inpr; if (block) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(&wakeup_count_wait_queue, &wait, TASK_INTERRUPTIBLE); split_counters(&cnt, &inpr); if (inpr == 0 || signal_pending(current)) break; pm_print_active_wakeup_sources(); schedule(); } finish_wait(&wakeup_count_wait_queue, &wait); } split_counters(&cnt, &inpr); *count = cnt; return !inpr; } /** * pm_save_wakeup_count - Save the current number of registered wakeup events. * @count: Value to compare with the current number of registered wakeup events. * * If @count is equal to the current number of registered wakeup events and the * current number of wakeup events being processed is zero, store @count as the * old number of registered wakeup events for pm_check_wakeup_events(), enable * wakeup events detection and return 'true'. Otherwise disable wakeup events * detection and return 'false'. */ bool pm_save_wakeup_count(unsigned int count) { unsigned int cnt, inpr; unsigned long flags; events_check_enabled = false; raw_spin_lock_irqsave(&events_lock, flags); split_counters(&cnt, &inpr); if (cnt == count && inpr == 0) { saved_count = count; events_check_enabled = true; } raw_spin_unlock_irqrestore(&events_lock, flags); return events_check_enabled; } #ifdef CONFIG_PM_AUTOSLEEP /** * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. * @set: Whether to set or to clear the autosleep_enabled flags. */ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); int srcuidx; srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { ws->autosleep_enabled = set; if (ws->active) { if (set) ws->start_prevent_time = now; else update_prevent_sleep_time(ws, now); } } spin_unlock_irq(&ws->lock); } srcu_read_unlock(&wakeup_srcu, srcuidx); } #endif /* CONFIG_PM_AUTOSLEEP */ /** * print_wakeup_source_stats - Print wakeup source statistics information. * @m: seq_file to print the statistics into. * @ws: Wakeup source object to print the statistics for. */ static int print_wakeup_source_stats(struct seq_file *m, struct wakeup_source *ws) { unsigned long flags; ktime_t total_time; ktime_t max_time; unsigned long active_count; ktime_t active_time; ktime_t prevent_sleep_time; spin_lock_irqsave(&ws->lock, flags); total_time = ws->total_time; max_time = ws->max_time; prevent_sleep_time = ws->prevent_sleep_time; active_count = ws->active_count; if (ws->active) { ktime_t now = ktime_get(); active_time = ktime_sub(now, ws->last_time); total_time = ktime_add(total_time, active_time); if (active_time > max_time) max_time = active_time; if (ws->autosleep_enabled) prevent_sleep_time = ktime_add(prevent_sleep_time, ktime_sub(now, ws->start_prevent_time)); } else { active_time = 0; } seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", ws->name, active_count, ws->event_count, ws->wakeup_count, ws->expire_count, ktime_to_ms(active_time), ktime_to_ms(total_time), ktime_to_ms(max_time), ktime_to_ms(ws->last_time), ktime_to_ms(prevent_sleep_time)); spin_unlock_irqrestore(&ws->lock, flags); return 0; } static void *wakeup_sources_stats_seq_start(struct seq_file *m, loff_t *pos) { struct wakeup_source *ws; loff_t n = *pos; int *srcuidx = m->private; if (n == 0) { seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); } *srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { if (n-- <= 0) return ws; } return NULL; } static void *wakeup_sources_stats_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct wakeup_source *ws = v; struct wakeup_source *next_ws = NULL; ++(*pos); list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) { next_ws = ws; break; } if (!next_ws) print_wakeup_source_stats(m, &deleted_ws); return next_ws; } static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v) { int *srcuidx = m->private; srcu_read_unlock(&wakeup_srcu, *srcuidx); } /** * wakeup_sources_stats_seq_show - Print wakeup sources statistics information. * @m: seq_file to print the statistics into. * @v: wakeup_source of each iteration */ static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v) { struct wakeup_source *ws = v; print_wakeup_source_stats(m, ws); return 0; } static const struct seq_operations wakeup_sources_stats_seq_ops = { .start = wakeup_sources_stats_seq_start, .next = wakeup_sources_stats_seq_next, .stop = wakeup_sources_stats_seq_stop, .show = wakeup_sources_stats_seq_show, }; static int wakeup_sources_stats_open(struct inode *inode, struct file *file) { return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int)); } static const struct file_operations wakeup_sources_stats_fops = { .owner = THIS_MODULE, .open = wakeup_sources_stats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int __init wakeup_sources_debugfs_init(void) { debugfs_create_file("wakeup_sources", 0444, NULL, NULL, &wakeup_sources_stats_fops); return 0; } postcore_initcall(wakeup_sources_debugfs_init);
21 2 1 5 1 1 1 2 13 2 6 5 3 3 1 1 2 2 2 6 9 9 21 15 15 6 2 11 15 24 7 3 6 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 /* Key type used to cache DNS lookups made by the kernel * * See Documentation/networking/dns_resolver.rst * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/keyctl.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/dns_resolver.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" MODULE_DESCRIPTION("DNS Resolver"); MODULE_AUTHOR("Wang Lei"); MODULE_LICENSE("GPL"); unsigned int dns_resolver_debug; module_param_named(debug, dns_resolver_debug, uint, 0644); MODULE_PARM_DE